diff --git a/CHANGELOG.md b/CHANGELOG.md index 093b440cfd9..d4df7f15542 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,31 @@ * [CHANGE] Ingester: the configuration parameter `-blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup` has been deprecated and will be removed in Mimir 2.10. #4445 * [CHANGE] Ingester: the `cortex_ingester_tsdb_wal_replay_duration_seconds` metrics has been removed. #4465 * [FEATURE] Cache: Introduce experimental support for using Redis for results, chunks, index, and metadata caches. #4371 +* [FEATURE] Vault: Introduce experimental integration with Vault to fetch secrets used to configure TLS for clients. Server TLS secrets will still be read from a file. `tls-ca-path`, `tls-cert-path` and `tls-key-path` will denote the path in Vault for the following CLI flags when `-vault.enabled` is true: #4446. + * `-distributor.ha-tracker.etcd.*` + * `-distributor.ring.etcd.*` + * `-distributor.forwarding.grpc-client.*` + * `-querier.store-gateway-client.*` + * `-ingester.client.*` + * `-ingester.ring.etcd.*` + * `-querier.frontend-client.*` + * `-query-frontend.grpc-client-config.*` + * `-query-frontend.results-cache.redis.*` + * `-blocks-storage.bucket-store.index-cache.redis.*` + * `-blocks-storage.bucket-store.chunks-cache.redis.*` + * `-blocks-storage.bucket-store.metadata-cache.redis.*` + * `-compactor.ring.etcd.*` + * `-store-gateway.sharding-ring.etcd.*` + * `-ruler.client.*` + * `-ruler.alertmanager-client.*` + * `-ruler.ring.etcd.*` + * `-ruler.query-frontend.grpc-client-config.*` + * `-alertmanager.sharding-ring.etcd.*` + * `-alertmanager.alertmanager-client.*` + * `-memberlist.*` + * `-query-scheduler.grpc-client-config.*` + * `-query-scheduler.ring.etcd.*` + * `-overrides-exporter.ring.etcd.*` * [ENHANCEMENT] Allow to define service name used for tracing via `JAEGER_SERVICE_NAME` environment variable. #4394 * [ENHANCEMENT] Querier and query-frontend: add experimental, more performant protobuf query result response format enabled with `-query-frontend.query-result-response-format=protobuf`. #4304 #4318 #4375 * [ENHANCEMENT] Compactor: added experimental configuration parameter `-compactor.first-level-compaction-wait-period`, to configure how long the compactor should wait before compacting 1st level blocks (uploaded by ingesters). This configuration option allows to reduce the chances compactor begins compacting blocks before all ingesters have uploaded their blocks to the storage. #4401 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 6d82c238308..e2ba2707a8a 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -8438,6 +8438,60 @@ "fieldValue": null, "fieldDefaultValue": null }, + { + "kind": "block", + "name": "vault", + "required": false, + "desc": "", + "blockEntries": [ + { + "kind": "field", + "name": "enabled", + "required": false, + "desc": "Enables fetching of keys and certificates from Vault", + "fieldValue": null, + "fieldDefaultValue": false, + "fieldFlag": "vault.enabled", + "fieldType": "boolean", + "fieldCategory": "experimental" + }, + { + "kind": "field", + "name": "url", + "required": false, + "desc": "Location of the Vault server", + "fieldValue": null, + "fieldDefaultValue": "", + "fieldFlag": "vault.url", + "fieldType": "string", + "fieldCategory": "experimental" + }, + { + "kind": "field", + "name": "token", + "required": false, + "desc": "Token used to authenticate with Vault", + "fieldValue": null, + "fieldDefaultValue": "", + "fieldFlag": "vault.token", + "fieldType": "string", + "fieldCategory": "experimental" + }, + { + "kind": "field", + "name": "mount_path", + "required": false, + "desc": "Location of secrets engine within Vault", + "fieldValue": null, + "fieldDefaultValue": "", + "fieldFlag": "vault.mount-path", + "fieldType": "string", + "fieldCategory": "experimental" + } + ], + "fieldValue": null, + "fieldDefaultValue": null + }, { "kind": "block", "name": "ruler", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 0cc8129f459..669889e6101 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -2289,5 +2289,13 @@ Usage of ./cmd/mimir/mimir: Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT. Longer metadata is dropped except for HELP which is truncated. (default 1024) -validation.separate-metrics-group-label string [experimental] Label used to define the group label for metrics separation. For each write request, the group is obtained from the first non-empty group label from the first timeseries in the incoming list of timeseries. Specific distributor and ingester metrics will be further separated adding a 'group' label with group label's value. Currently applies to the following metrics: cortex_discarded_samples_total + -vault.enabled + [experimental] Enables fetching of keys and certificates from Vault + -vault.mount-path string + [experimental] Location of secrets engine within Vault + -vault.token string + [experimental] Token used to authenticate with Vault + -vault.url string + [experimental] Location of the Vault server -version Print application version and exit. diff --git a/docs/sources/mimir/configure/about-versioning.md b/docs/sources/mimir/configure/about-versioning.md index ee2e6ba4ab7..33b2e4ae1ba 100644 --- a/docs/sources/mimir/configure/about-versioning.md +++ b/docs/sources/mimir/configure/about-versioning.md @@ -116,6 +116,7 @@ The following features are currently experimental: - `-query-frontend.query-result-response-format=protobuf` - `-ruler.query-frontend.query-result-response-format=protobuf` - Per-tenant Results cache TTL (`-query-frontend.results-cache-ttl`, `-query-frontend.results-cache-ttl-for-out-of-order-time-window`) +- Fetching TLS secrets from Vault for various clients (`-vault.enabled`) ## Deprecated features diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index 33869570502..c4b3b4c7fd2 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -202,6 +202,23 @@ activity_tracker: # CLI flag: -activity-tracker.max-entries [max_entries: | default = 1024] +vault: + # (experimental) Enables fetching of keys and certificates from Vault + # CLI flag: -vault.enabled + [enabled: | default = false] + + # (experimental) Location of the Vault server + # CLI flag: -vault.url + [url: | default = ""] + + # (experimental) Token used to authenticate with Vault + # CLI flag: -vault.token + [token: | default = ""] + + # (experimental) Location of secrets engine within Vault + # CLI flag: -vault.mount-path + [mount_path: | default = ""] + # The ruler block configures the ruler. [ruler: ] diff --git a/go.mod b/go.mod index ec512da70ab..c51546f08fb 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 - github.com/opentracing/opentracing-go v1.2.0 + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.1-0.20230119163903-f59460bfd4bf github.com/prometheus/client_golang v1.14.0 @@ -59,6 +59,7 @@ require ( github.com/google/uuid v1.3.0 github.com/grafana-tools/sdk v0.0.0-20211220201350-966b3088eec9 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd + github.com/hashicorp/vault/api v1.9.0 github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.69.0 @@ -74,17 +75,31 @@ require ( sigs.k8s.io/kustomize/kyaml v0.13.7 ) +require ( + github.com/Azure/azure-sdk-for-go v67.2.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/go-test/deep v1.1.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect +) + require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.12.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect - github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -109,7 +124,7 @@ require ( github.com/chromedp/chromedp v0.8.2 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dlclark/regexp2 v1.4.0 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect @@ -120,7 +135,7 @@ require ( github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-errors/errors v1.0.1 // indirect + github.com/go-errors/errors v1.4.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -150,7 +165,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.4.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-msgpack v1.1.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect @@ -163,7 +178,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.15.9 // indirect + github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/cpuid/v2 v2.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect diff --git a/go.sum b/go.sum index 54bf45f47f2..f8711a06bcc 100644 --- a/go.sum +++ b/go.sum @@ -69,13 +69,14 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= +github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -86,8 +87,8 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= -github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -164,6 +165,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -208,8 +211,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -262,8 +266,9 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= +github.com/go-errors/errors v1.4.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -331,6 +336,8 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -536,6 +543,7 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= @@ -544,25 +552,33 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -572,6 +588,8 @@ github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= @@ -581,6 +599,8 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= +github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -620,8 +640,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= @@ -749,8 +769,8 @@ github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.m github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -830,6 +850,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1288,6 +1310,7 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1531,6 +1554,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/telebot.v3 v3.1.2 h1:uw3zobPBnexytTsIPyxsS10xHRLXCf5f2GQhBxp6NaU= gopkg.in/telebot.v3 v3.1.2/go.mod h1:GJKwwWqp9nSkIVN51eRKU78aB5f5OnQuWdwiIZfPbko= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= diff --git a/pkg/mimir/mimir.go b/pkg/mimir/mimir.go index 3801d496263..f43d4e65d63 100644 --- a/pkg/mimir/mimir.go +++ b/pkg/mimir/mimir.go @@ -72,6 +72,7 @@ import ( "github.com/grafana/mimir/pkg/util/process" "github.com/grafana/mimir/pkg/util/validation" "github.com/grafana/mimir/pkg/util/validation/exporter" + "github.com/grafana/mimir/pkg/vault" ) var errInvalidBucketConfig = errors.New("invalid bucket config") @@ -118,6 +119,7 @@ type Config struct { StoreGateway storegateway.Config `yaml:"store_gateway"` TenantFederation tenantfederation.Config `yaml:"tenant_federation"` ActivityTracker activitytracker.Config `yaml:"activity_tracker"` + Vault vault.Config `yaml:"vault"` Ruler ruler.Config `yaml:"ruler"` RulerStorage rulestore.Config `yaml:"ruler_storage"` @@ -169,6 +171,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { c.Ruler.RegisterFlags(f, logger) c.RulerStorage.RegisterFlags(f, logger) + c.Vault.RegisterFlags(f) c.Alertmanager.RegisterFlags(f, logger) c.AlertmanagerStorage.RegisterFlags(f, logger) c.RuntimeConfig.RegisterFlags(f) @@ -261,6 +264,9 @@ func (c *Config) Validate(log log.Logger) error { if err := c.UsageStats.Validate(); err != nil { return errors.Wrap(err, "invalid usage stats config") } + if err := c.Vault.Validate(); err != nil { + return errors.Wrap(err, "invalid vault config") + } if c.isAnyModuleEnabled(AlertManager, Backend) { if err := c.Alertmanager.Validate(); err != nil { return errors.Wrap(err, "invalid alertmanager config") @@ -669,6 +675,7 @@ type Mimir struct { StoreGateway *storegateway.StoreGateway MemberlistKV *memberlist.KVInitService ActivityTracker *activitytracker.ActivityTracker + Vault *vault.Vault UsageStatsReporter *usagestats.Reporter BuildInfoHandler http.Handler diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 4729ae9462f..a3b4ef85c37 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -57,6 +57,7 @@ import ( "github.com/grafana/mimir/pkg/util/validation" "github.com/grafana/mimir/pkg/util/validation/exporter" "github.com/grafana/mimir/pkg/util/version" + "github.com/grafana/mimir/pkg/vault" ) // The various modules that make up Mimir. @@ -87,6 +88,7 @@ const ( StoreGateway string = "store-gateway" MemberlistKV string = "memberlist-kv" QueryScheduler string = "query-scheduler" + Vault string = "vault" TenantFederation string = "tenant-federation" UsageStats string = "usage-stats" All string = "all" @@ -167,6 +169,48 @@ func (t *Mimir) initActivityTracker() (services.Service, error) { }), nil } +func (t *Mimir) initVault() (services.Service, error) { + if !t.Cfg.Vault.Enabled { + return nil, nil + } + + vault, err := vault.NewVault(t.Cfg.Vault) + if err != nil { + return nil, err + } + t.Vault = vault + + // Update Configs - KVStore + t.Cfg.MemberlistKV.TCPTransport.TLS.Reader = t.Vault + t.Cfg.Distributor.HATrackerConfig.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.Alertmanager.ShardingRing.Common.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.Compactor.ShardingRing.Common.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.Distributor.DistributorRing.Common.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.Ingester.IngesterRing.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.Ruler.Ring.Common.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.StoreGateway.ShardingRing.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.QueryScheduler.ServiceDiscovery.SchedulerRing.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + t.Cfg.OverridesExporter.Ring.Common.KVStore.StoreConfig.Etcd.TLS.Reader = t.Vault + + // Update Configs - Redis Clients + t.Cfg.BlocksStorage.BucketStore.IndexCache.BackendConfig.Redis.TLS.Reader = t.Vault + t.Cfg.BlocksStorage.BucketStore.ChunksCache.BackendConfig.Redis.TLS.Reader = t.Vault + t.Cfg.BlocksStorage.BucketStore.MetadataCache.BackendConfig.Redis.TLS.Reader = t.Vault + t.Cfg.Frontend.QueryMiddleware.ResultsCacheConfig.BackendConfig.Redis.TLS.Reader = t.Vault + + // Update Configs - GRPC Clients + t.Cfg.IngesterClient.GRPCClientConfig.TLS.Reader = t.Vault + t.Cfg.Worker.GRPCClientConfig.TLS.Reader = t.Vault + t.Cfg.Querier.StoreGatewayClient.TLS.Reader = t.Vault + t.Cfg.Frontend.FrontendV2.GRPCClientConfig.TLS.Reader = t.Vault + t.Cfg.Ruler.ClientTLSConfig.TLS.Reader = t.Vault + t.Cfg.Ruler.Notifier.TLS.Reader = t.Vault + t.Cfg.Alertmanager.AlertmanagerClient.GRPCClientConfig.TLS.Reader = t.Vault + t.Cfg.QueryScheduler.GRPCClientConfig.TLS.Reader = t.Vault + + return nil, nil +} + func (t *Mimir) initSanityCheck() (services.Service, error) { return services.NewIdleService(func(ctx context.Context) error { return runSanityCheck(ctx, t.Cfg, util_log.Logger) @@ -867,6 +911,7 @@ func (t *Mimir) setupModuleManager() error { mm.RegisterModule(QueryScheduler, t.initQueryScheduler) mm.RegisterModule(TenantFederation, t.initTenantFederation, modules.UserInvisibleModule) mm.RegisterModule(UsageStats, t.initUsageStats, modules.UserInvisibleModule) + mm.RegisterModule(Vault, t.initVault, modules.UserInvisibleModule) mm.RegisterModule(Write, nil) mm.RegisterModule(Read, nil) mm.RegisterModule(Backend, nil) @@ -876,27 +921,27 @@ func (t *Mimir) setupModuleManager() error { deps := map[string][]string{ Server: {ActivityTracker, SanityCheck, UsageStats}, API: {Server}, - MemberlistKV: {API}, + MemberlistKV: {API, Vault}, RuntimeConfig: {API}, - Ring: {API, RuntimeConfig, MemberlistKV}, + Ring: {API, RuntimeConfig, MemberlistKV, Vault}, Overrides: {RuntimeConfig}, - OverridesExporter: {Overrides, MemberlistKV}, - Distributor: {DistributorService, API, ActiveGroupsCleanupService}, - DistributorService: {Ring, Overrides}, - Ingester: {IngesterService, API, ActiveGroupsCleanupService}, + OverridesExporter: {Overrides, MemberlistKV, Vault}, + Distributor: {DistributorService, API, ActiveGroupsCleanupService, Vault}, + DistributorService: {Ring, Overrides, Vault}, + Ingester: {IngesterService, API, ActiveGroupsCleanupService, Vault}, IngesterService: {Overrides, RuntimeConfig, MemberlistKV}, Flusher: {Overrides, API}, Queryable: {Overrides, DistributorService, Ring, API, StoreQueryable, MemberlistKV}, - Querier: {TenantFederation}, + Querier: {TenantFederation, Vault}, StoreQueryable: {Overrides, MemberlistKV}, QueryFrontendTripperware: {API, Overrides}, - QueryFrontend: {QueryFrontendTripperware, MemberlistKV}, - QueryScheduler: {API, Overrides, MemberlistKV}, - Ruler: {DistributorService, StoreQueryable, RulerStorage}, + QueryFrontend: {QueryFrontendTripperware, MemberlistKV, Vault}, + QueryScheduler: {API, Overrides, MemberlistKV, Vault}, + Ruler: {DistributorService, StoreQueryable, RulerStorage, Vault}, RulerStorage: {Overrides}, - AlertManager: {API, MemberlistKV, Overrides}, - Compactor: {API, MemberlistKV, Overrides}, - StoreGateway: {API, Overrides, MemberlistKV}, + AlertManager: {API, MemberlistKV, Overrides, Vault}, + Compactor: {API, MemberlistKV, Overrides, Vault}, + StoreGateway: {API, Overrides, MemberlistKV, Vault}, TenantFederation: {Queryable}, Write: {Distributor, Ingester}, Read: {QueryFrontend, Querier}, diff --git a/pkg/vault/vault.go b/pkg/vault/vault.go new file mode 100644 index 00000000000..5c4a6ba16e5 --- /dev/null +++ b/pkg/vault/vault.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package vault + +import ( + "context" + "errors" + "flag" + "fmt" + + hashivault "github.com/hashicorp/vault/api" +) + +// Config for the Vault used to fetch secrets +type Config struct { + Enabled bool `yaml:"enabled" category:"experimental"` + + URL string `yaml:"url" category:"experimental"` + Token string `yaml:"token" category:"experimental"` + MountPath string `yaml:"mount_path" category:"experimental"` +} + +func (cfg *Config) Validate() error { + if !cfg.Enabled { + return nil + } + + if cfg.URL == "" { + return errors.New("empty vault URL supplied") + } + + if cfg.Token == "" { + return errors.New("empty vault authentication token supplied") + } + + if cfg.MountPath == "" { + return errors.New("empty vault mount path supplied") + } + + return nil +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.Enabled, "vault.enabled", false, "Enables fetching of keys and certificates from Vault") + f.StringVar(&cfg.URL, "vault.url", "", "Location of the Vault server") + f.StringVar(&cfg.Token, "vault.token", "", "Token used to authenticate with Vault") + f.StringVar(&cfg.MountPath, "vault.mount-path", "", "Location of secrets engine within Vault") +} + +type SecretsEngine interface { + Get(ctx context.Context, path string) (*hashivault.KVSecret, error) +} + +type Vault struct { + KVStore SecretsEngine +} + +func NewVault(cfg Config) (*Vault, error) { + config := hashivault.DefaultConfig() + config.Address = cfg.URL + + client, err := hashivault.NewClient(config) + if err != nil { + return nil, err + } + + client.SetToken(cfg.Token) + vault := &Vault{ + KVStore: client.KVv2(cfg.MountPath), + } + + return vault, nil +} + +func (v *Vault) ReadSecret(path string) ([]byte, error) { + secret, err := v.KVStore.Get(context.Background(), path) + + if err != nil { + return nil, fmt.Errorf("unable to read secret from vault: %v", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("secret data is nil") + } + + data, ok := secret.Data["value"].(string) + if !ok { + return nil, fmt.Errorf("secret data type is not string, found %T value: %#v", secret.Data["value"], secret.Data["value"]) + } + + return []byte(data), nil +} diff --git a/pkg/vault/vault_test.go b/pkg/vault/vault_test.go new file mode 100644 index 00000000000..0877a4cfaa6 --- /dev/null +++ b/pkg/vault/vault_test.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package vault + +import ( + "context" + "errors" + "testing" + + hashivault "github.com/hashicorp/vault/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadSecret(t *testing.T) { + mockKVStore := newMockKVStore() + mimirVaultClient := Vault{ + KVStore: mockKVStore, + } + + tests := map[string]struct { + path string + expectError bool + expectedValue string + errorMsg string + }{ + "read secret1 with no error": { + path: "test/secret1", + expectError: false, + expectedValue: "foo1", + }, + "read secret2 with no error": { + path: "test/secret2", + expectError: false, + expectedValue: "foo2", + }, + "read secret with non-existent path": { + path: "test/secret3", + expectError: true, + errorMsg: "unable to read secret from vault", + }, + "secret returned is nil": { + path: "test/secret4", + expectError: true, + errorMsg: "secret data is nil", + }, + "secret data is not a string": { + path: "test/secret5", + expectError: true, + errorMsg: "secret data type is not string, found int value: 123", + }, + "secret data is nil": { + path: "test/secret6", + expectError: true, + errorMsg: "secret data type is not string, found value: ", + }, + } + + for testName, testCase := range tests { + t.Run(testName, func(t *testing.T) { + secret, err := mimirVaultClient.ReadSecret(testCase.path) + if testCase.expectError { + require.Error(t, err) + require.ErrorContains(t, err, testCase.errorMsg) + } else { + require.NoError(t, err) + assert.Equal(t, testCase.expectedValue, string(secret)) + } + }) + } +} + +type mockKVStore struct { + values map[string]mockValue +} + +type mockValue struct { + secret *hashivault.KVSecret + err error +} + +func newMockKVStore() *mockKVStore { + return &mockKVStore{ + values: map[string]mockValue{ + "test/secret1": { + secret: &hashivault.KVSecret{ + Data: map[string]interface{}{ + "value": "foo1", + }, + }, + err: nil, + }, + "test/secret2": { + secret: &hashivault.KVSecret{ + Data: map[string]interface{}{ + "value": "foo2", + }, + }, + err: nil, + }, + "test/secret3": { + secret: nil, + err: errors.New("non-existent path"), + }, + "test/secret4": { + secret: nil, + err: nil, + }, + "test/secret5": { + secret: &hashivault.KVSecret{ + Data: map[string]interface{}{ + "value": 123, + }, + }, + err: nil, + }, + "test/secret6": { + secret: &hashivault.KVSecret{ + Data: map[string]interface{}{ + "value": nil, + }, + }, + err: nil, + }, + }, + } +} + +func (m *mockKVStore) Get(ctx context.Context, path string) (*hashivault.KVSecret, error) { + return m.values[path].secret, m.values[path].err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 80321d29a9b..c2176107e04 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,21 @@ # Release History +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + ## 1.2.0 (2022-11-04) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index f9fb23422df..72c2cf21eef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -7,25 +7,20 @@ package azcore import ( - "context" "reflect" - "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // AccessToken represents an Azure service bearer access token with expiry information. -type AccessToken struct { - Token string - ExpiresOn time.Time -} +type AccessToken = exported.AccessToken // TokenCredential represents a credential capable of providing an OAuth token. -type TokenCredential interface { - // GetToken requests an access token for the specified set of scopes. - GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error) -} +type TokenCredential = exported.TokenCredential // holds sentinel values used to send nulls var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} @@ -73,3 +68,46 @@ func IsNullValue[T any](v T) bool { // ClientOptions contains configuration settings for a client's pipeline. type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer +} + +// NewClient creates a new Client instance with the provided values. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + pkg, err := shared.ExtractPackageName(clientName) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(clientName, moduleVersion) + return &Client{pl: pl, tr: tr}, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index 6e029d493ce..2ffbc0e4741 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -7,8 +7,10 @@ package exported import ( + "context" "io" "net/http" + "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" ) @@ -58,3 +60,24 @@ func Payload(resp *http.Response) ([]byte, error) { resp.Body = shared.NewNopClosingBytesReader(bytesBody) return bytesBody, nil } + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 4aeec158937..38867574178 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -100,32 +100,47 @@ func (req *Request) OperationValue(value interface{}) bool { return req.values.get(value) } -// SetBody sets the specified ReadSeekCloser as the HTTP request body. +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { - // Set the body and content length. - size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return err + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } } if size == 0 { - body.Close() - return nil - } - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return err + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } } - req.Raw().GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream - return body, err - } - // keep a copy of the original body. this is to handle cases + // keep a copy of the body argument. this is to handle cases // where req.Body is replaced, e.g. httputil.DumpRequest and friends. req.body = body req.req.Body = body req.req.ContentLength = size - req.req.Header.Set(shared.HeaderContentType, contentType) - req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else { + req.req.Header.Set(shared.HeaderContentType, contentType) + } return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 75d241c5b42..0fe2e2cb7fd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -30,5 +30,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.2.0" + Version = "v1.3.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index 96eef2956ff..7c71df30700 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -9,10 +9,13 @@ package shared import ( "context" "errors" + "fmt" "io" "net/http" "reflect" + "regexp" "strconv" + "strings" "time" ) @@ -133,3 +136,24 @@ type TransportFunc func(*http.Request) (*http.Response, error) func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { return pf(req) } + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ExtractPackageName returns "package" from "package.Client". +// If clientName is malformed, an error is returned. +func ExtractPackageName(clientName string) (string, error) { + pkg, client, ok := strings.Cut(clientName, ".") + if !ok { + return "", fmt.Errorf("missing . in clientName %s", clientName) + } else if pkg == "" || client == "" { + return "", fmt.Errorf("malformed clientName %s", clientName) + } + return pkg, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index 27c30229883..c427e14d88c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -7,6 +7,7 @@ package policy import ( + "net/http" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" @@ -125,12 +126,30 @@ type TelemetryOptions struct { } // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. -type TokenRequestOptions struct { - // Scopes contains the list of permission scopes required for the token. - Scopes []string -} +type TokenRequestOptions = exported.TokenRequestOptions // BearerTokenOptions configures the bearer token policy's behavior. type BearerTokenOptions struct { - // placeholder for future options + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index 71e3062be0b..b61e4c121f6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -4,35 +4,39 @@ package runtime import ( + "errors" "net/http" "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" ) // BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. type BearerTokenPolicy struct { // mainResource is the resource to be retreived using the tenant specified in the credential - mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] // the following fields are read-only - cred azcore.TokenCredential - scopes []string + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string } type acquiringResourceState struct { req *policy.Request p *BearerTokenPolicy + tro policy.TokenRequestOptions } // acquire acquires or updates the resource; only one // thread/goroutine at a time ever calls this function -func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { - tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes}) +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) if err != nil { - return azcore.AccessToken{}, time.Time{}, err + return exported.AccessToken{}, time.Time{}, err } return tk, tk.ExpiresOn, nil } @@ -41,24 +45,72 @@ func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newE // cred: an azcore.TokenCredential implementation such as a credential object from azidentity // scopes: the list of permission scopes required for the token. // opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. -func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), } } +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + // Do authorizes a request with a bearer token func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { - as := acquiringResourceState{ - p: b, - req: req, + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) } - tk, err := b.mainResource.Get(as) + if err != nil { + return nil, ensureNonRetriable(err) + } + + res, err := req.Next() if err != nil { return nil, err } - req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) - return req.Next() + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + return res, ensureNonRetriable(err) +} + +func ensureNonRetriable(err error) error { + var nre errorinfo.NonRetriable + if err != nil && !errors.As(err, &nre) { + err = btpError{err} + } + return err } + +// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize +type btpError struct { + error +} + +func (btpError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go index 8563375af07..fbcd48311b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -20,6 +20,9 @@ type progress struct { } // NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { return exported.NopCloser(rs) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 5877e476f6f..6ac3a8e391e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,35 @@ # Release History +## 1.2.1 (2023-01-26) + +### Other Changes +* Upgrade MSAL to v0.8.1 + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + ## 1.2.0 (2022-11-08) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 60c3b9a1ec6..98d6219ee7e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -150,16 +150,16 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { // enables fakes for test scenarios type confidentialClient interface { - AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireTokenSilentOption) (confidential.AuthResult, error) - AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireTokenByAuthCodeOption) (confidential.AuthResult, error) - AcquireTokenByCredential(ctx context.Context, scopes []string) (confidential.AuthResult, error) + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) } // enables fakes for test scenarios type publicClient interface { - AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireTokenSilentOption) (public.AuthResult, error) - AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (public.AuthResult, error) - AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (public.DeviceCode, error) - AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireTokenByAuthCodeOption) (public.AuthResult, error) - AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.InteractiveAuthOption) (public.AuthResult, error) + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 9032ae9886a..4d2afe4f5c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -73,11 +73,7 @@ func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } - o := []public.InteractiveAuthOption{} - if c.options.RedirectURL != "" { - o = append(o, public.WithRedirectURI(c.options.RedirectURL)) - } - ar, err = c.client.AcquireTokenInteractive(ctx, opts.Scopes, o...) + ar, err = c.client.AcquireTokenInteractive(ctx, opts.Scopes, public.WithRedirectURI(c.options.RedirectURL)) if err != nil { return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameBrowser, err) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 9757589d166..85226d55ae1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -11,5 +11,5 @@ const ( component = "azidentity" // Version is the semantic version (see http://semver.org) of this module. - version = "v1.2.0" + version = "v1.2.1" ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 11a33de73a1..85a1ba6deae 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -27,6 +27,7 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" ) @@ -283,6 +284,10 @@ type Options struct { // Instructs MSAL Go to use an Azure regional token service with sepcified AzureRegion. AzureRegion string + + capabilities []string + + disableInstanceDiscovery bool } func (o Options) validate() error { @@ -314,6 +319,15 @@ func WithAccessor(accessor cache.ExportReplace) Option { } } +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *Options) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + // WithHTTPClient allows for a custom HTTP client to be set. func WithHTTPClient(httpClient ops.HTTPClient) Option { return func(o *Options) { @@ -328,6 +342,13 @@ func WithX5C() Option { } } +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *Options) { + o.disableInstanceDiscovery = !enabled + } +} + // WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. // Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. // See https://aka.ms/region-map for more details on region names. @@ -368,24 +389,20 @@ func New(clientID string, cred Credential, options ...Option) (Client, error) { baseOpts := []base.Option{ base.WithCacheAccessor(opts.Accessor), + base.WithClientCapabilities(opts.capabilities), base.WithRegionDetection(opts.AzureRegion), base.WithX5C(opts.SendX5C), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), } if cred.tokenProvider != nil { // The caller will handle all details of authentication, using Client only as a token cache. - // Declaring the authority host known prevents unnecessary metadata discovery requests. (The - // authority is irrelevant to Client and friends because the token provider is responsible - // for authentication.) - parsed, err := url.Parse(opts.Authority) - if err != nil { - return Client{}, errors.New("invalid authority") - } - baseOpts = append(baseOpts, base.WithKnownAuthorityHosts([]string{parsed.Hostname()})) + baseOpts = append(baseOpts, base.WithInstanceDiscovery(false)) } base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), baseOpts...) if err != nil { return Client{}, err } + base.AuthParams.IsConfidentialClient = true return Client{base: base, cred: internalCred}, nil } @@ -395,9 +412,159 @@ func (cca Client) UserID() string { return cca.userID } +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + // AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. -func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { - return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, cca.base.AuthParams) +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *AcquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *AcquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } } // AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. @@ -405,35 +572,63 @@ func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, type AcquireTokenSilentOptions struct { // Account represents the account to use. To set, use the WithSilentAccount() option. Account Account + + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() } // AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) +func (AcquireTokenSilentOption) acquireSilentOption() {} + // WithSilentAccount uses the passed account during an AcquireTokenSilent() call. -func WithSilentAccount(account Account) AcquireTokenSilentOption { - return func(a *AcquireTokenSilentOptions) { - a.Account = account +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenSilentOptions: + t.Account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), } } // AcquireTokenSilent acquires a token from either the cache or using a refresh token. -func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { - opts := AcquireTokenSilentOptions{} - for _, o := range options { - o(&opts) +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := AcquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err } - var isAppCache bool - if opts.Account.IsZero() { - isAppCache = true + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") } silentParameters := base.AcquireTokenSilentParameters{ Scopes: scopes, - Account: opts.Account, + Account: o.Account, RequestType: accesstokens.ATConfidential, Credential: cca.cred, - IsAppCache: isAppCache, + IsAppCache: o.Account.IsZero(), + TenantID: o.tenantID, } return cca.base.AcquireTokenSilent(ctx, silentParameters) @@ -442,43 +637,93 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, optio // AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. type AcquireTokenByAuthCodeOptions struct { Challenge string + + claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() } // AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) +func (AcquireTokenByAuthCodeOption) acquireByAuthCodeOption() {} + // WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. -func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { - return func(a *AcquireTokenByAuthCodeOptions) { - a.Challenge = challenge +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.Challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), } } // AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. // The specified redirect URI must be the same URI that was used when the authorization code was requested. -func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { - opts := AcquireTokenByAuthCodeOptions{} - for _, o := range options { - o(&opts) +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := AcquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err } params := base.AcquireTokenAuthCodeParameters{ Scopes: scopes, Code: code, - Challenge: opts.Challenge, + Challenge: o.Challenge, + Claims: o.claims, AppType: accesstokens.ATConfidential, Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode RedirectURI: redirectURI, + TenantID: o.tenantID, } return cca.base.AcquireTokenByAuthCode(ctx, params) } +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + // AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. -func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string) (AuthResult, error) { - authParams := cca.base.AuthParams +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } authParams.Scopes = scopes authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) if err != nil { @@ -487,13 +732,31 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string) return cca.base.AuthResultFromToken(ctx, authParams, token, true) } +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + // AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. // Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. -func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string) (AuthResult, error) { +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } params := base.AcquireTokenOnBehalfOfParameters{ Scopes: scopes, UserAssertion: userAssertion, + Claims: o.claims, Credential: cca.cred, + TenantID: o.tenantID, } return cca.base.AcquireTokenOnBehalfOf(ctx, params) } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index a86f06400ba..ed8715ced44 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -55,8 +55,10 @@ type AcquireTokenSilentParameters struct { RequestType accesstokens.AppType Credential *accesstokens.Credential IsAppCache bool + TenantID string UserAssertion string AuthorizationType authority.AuthorizeType + Claims string } // AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. @@ -67,14 +69,18 @@ type AcquireTokenAuthCodeParameters struct { Scopes []string Code string Challenge string + Claims string RedirectURI string AppType accesstokens.AppType Credential *accesstokens.Credential + TenantID string } type AcquireTokenOnBehalfOfParameters struct { Scopes []string + Claims string Credential *accesstokens.Credential + TenantID string UserAssertion string } @@ -136,42 +142,69 @@ type Client struct { } // Option is an optional argument to the New constructor. -type Option func(c *Client) +type Option func(c *Client) error // WithCacheAccessor allows you to set some type of cache for storing authentication tokens. func WithCacheAccessor(ca cache.ExportReplace) Option { - return func(c *Client) { + return func(c *Client) error { if ca != nil { c.cacheAccessor = ca } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err } } // WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user func WithKnownAuthorityHosts(hosts []string) Option { - return func(c *Client) { + return func(c *Client) error { cp := make([]string, len(hosts)) copy(cp, hosts) c.AuthParams.KnownAuthorityHosts = cp + return nil } } // WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. func WithX5C(sendX5C bool) Option { - return func(c *Client) { + return func(c *Client) error { c.AuthParams.SendX5C = sendX5C + return nil } } func WithRegionDetection(region string) Option { - return func(c *Client) { + return func(c *Client) error { c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil } } // New is the constructor for Base. func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { - authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true) + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) if err != nil { return Client{}, err } @@ -184,9 +217,11 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O pmanager: storage.NewPartitionedManager(token), } for _, o := range options { - o(&client) + if err = o(&client); err != nil { + break + } } - return client, nil + return client, err } @@ -202,6 +237,11 @@ func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, s return "", err } + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + v := url.Values{} v.Add("client_id", clientID) v.Add("response_type", "code") @@ -210,41 +250,49 @@ func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, s if authParams.State != "" { v.Add("state", authParams.State) } + if claims != "" { + v.Add("claims", claims) + } if authParams.CodeChallenge != "" { v.Add("code_challenge", authParams.CodeChallenge) } if authParams.CodeChallengeMethod != "" { v.Add("code_challenge_method", authParams.CodeChallengeMethod) } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } if authParams.Prompt != "" { v.Add("prompt", authParams.Prompt) } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } // There were left over from an implementation that didn't use any of these. We may // need to add them later, but as of now aren't needed. /* if p.ResponseMode != "" { urlParams.Add("response_mode", p.ResponseMode) } - if p.LoginHint != "" { - urlParams.Add("login_hint", p.LoginHint) - } - if p.DomainHint != "" { - urlParams.Add("domain_hint", p.DomainHint) - } */ baseURL.RawQuery = v.Encode() return baseURL.String(), nil } func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { - authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer. + // when tenant == "", the caller didn't specify a tenant and WithTenant will use the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return AuthResult{}, err + } authParams.Scopes = silent.Scopes authParams.HomeAccountID = silent.Account.HomeAccountID authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims authParams.UserAssertion = silent.UserAssertion var storageTokenResponse storage.TokenResponse - var err error if authParams.AuthorizationType == authority.ATOnBehalfOf { if s, ok := b.pmanager.(cache.Serializer); ok { suggestedCacheKey := authParams.CacheKey(silent.IsAppCache) @@ -268,29 +316,37 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen } } - result, err := AuthResultFromStorage(storageTokenResponse) - if err != nil { - if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { - return AuthResult{}, errors.New("no token found") - } - - var cc *accesstokens.Credential - if silent.RequestType == accesstokens.ATConfidential { - cc = silent.Credential + // ignore cached access tokens when given claims + if silent.Claims == "" { + result, err := AuthResultFromStorage(storageTokenResponse) + if err == nil { + return result, nil } + } - token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) - if err != nil { - return AuthResult{}, err - } + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return AuthResult{}, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } - return b.AuthResultFromToken(ctx, authParams, token, true) + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return AuthResult{}, err } - return result, nil + + return b.AuthResultFromToken(ctx, authParams, token, true) } func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { - authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims authParams.Scopes = authCodeParams.Scopes authParams.Redirecturi = authCodeParams.RedirectURI authParams.AuthorizationType = authority.ATAuthCode @@ -316,28 +372,33 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui // AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { - authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. - authParams.Scopes = onBehalfOfParams.Scopes - authParams.AuthorizationType = authority.ATOnBehalfOf - authParams.UserAssertion = onBehalfOfParams.UserAssertion - + var ar AuthResult silentParameters := AcquireTokenSilentParameters{ Scopes: onBehalfOfParams.Scopes, RequestType: accesstokens.ATConfidential, Credential: onBehalfOfParams.Credential, UserAssertion: onBehalfOfParams.UserAssertion, AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, } - token, err := b.AcquireTokenSilent(ctx, silentParameters) + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) if err != nil { - fmt.Println("Acquire Token Silent failed ") - token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) - if err != nil { - return AuthResult{}, err - } - return b.AuthResultFromToken(ctx, authParams, token, true) + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) } - return token, err + return ar, err } func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go index d17e7c034a4..87d7d797b3e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -37,48 +37,49 @@ func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { // Read reads a storage token from the cache if it exists. func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes - metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) - if err != nil { - return TokenResponse{}, err + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases } + userAssertionHash := authParameters.AssertionHash() partitionKeyFromRequest := userAssertionHash - accessToken, err := m.readAccessToken(metadata.Aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) - if err != nil { - return TokenResponse{}, err - } - - AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) - if err != nil { - return TokenResponse{}, err + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err == nil { + tr.AccessToken = accessToken } - familyID := AppMetaData.FamilyID - - refreshToken, err := m.readRefreshToken(metadata.Aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) - if err != nil { - return TokenResponse{}, err + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken } - idToken, err := m.readIDToken(metadata.Aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) - if err != nil { - return TokenResponse{}, err + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } } - account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) - if err != nil { - return TokenResponse{}, err + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account } - return TokenResponse{ - AccessToken: accessToken, - RefreshToken: refreshToken, - IDToken: idToken, - Account: account, - }, nil + return tr, nil } // Write writes a token response to the cache and returns the account information the token is stored with. @@ -143,13 +144,18 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes localAccountID := idTokenJwt.LocalAccountID() authorityType := authParameters.AuthorityInfo.AuthorityType + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + account = shared.NewAccount( homeAccountID, environment, realm, localAccountID, authorityType, - idTokenJwt.PreferredUsername, + preferredUsername, ) if authParameters.AuthorizationType == authority.ATOnBehalfOf { account.UserAssertionHash = userAssertionHash diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index b759408b5b4..1c0471bb980 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -84,14 +84,15 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { + tr := TokenResponse{} homeAccountID := authParameters.HomeAccountID realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes - // fetch metadata if and only if the authority isn't explicitly trusted - aliases := authParameters.KnownAuthorityHosts - if len(aliases) == 0 { + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) if err != nil { return TokenResponse{}, err @@ -100,40 +101,32 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, } accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + tr.AccessToken = accessToken if account.IsZero() { - return TokenResponse{ - AccessToken: accessToken, - RefreshToken: accesstokens.RefreshToken{}, - IDToken: IDToken{}, - Account: shared.Account{}, - }, nil + return tr, nil } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) - if err != nil { - return TokenResponse{}, err + if err == nil { + tr.IDToken = idToken } - AppMetaData, err := m.readAppMetaData(aliases, clientID) - if err != nil { - return TokenResponse{}, err + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } } - familyID := AppMetaData.FamilyID - refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) - if err != nil { - return TokenResponse{}, err - } account, err = m.readAccount(homeAccountID, aliases, realm) - if err != nil { - return TokenResponse{}, err + if err == nil { + tr.Account = account } - return TokenResponse{ - AccessToken: accessToken, - RefreshToken: refreshToken, - IDToken: idToken, - Account: account, - }, nil + return tr, nil } const scopeSeparator = " " @@ -146,7 +139,6 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) - cachedAt := time.Now() var account shared.Account @@ -189,13 +181,18 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces localAccountID := idTokenJwt.LocalAccountID() authorityType := authParameters.AuthorityInfo.AuthorityType + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + account = shared.NewAccount( homeAccountID, environment, realm, localAccountID, authorityType, - idTokenJwt.PreferredUsername, + preferredUsername, ) if err := m.writeAccount(account); err != nil { return shared.Account{}, err diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index 6b4016c1166..f910823544b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -100,8 +100,10 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams scopes := make([]string, len(authParams.Scopes)) copy(scopes, authParams.Scopes) params := exported.TokenProviderParameters{ + Claims: authParams.Claims, CorrelationID: uuid.New().String(), Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, } tr, err := cred.TokenProvider(ctx, params) if err != nil { @@ -138,7 +140,6 @@ func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams if cred.Secret != "" { return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) - } jwt, err := cred.JWT(ctx, authParams) if err != nil { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index eaeb2ef5f08..fa6bb61c8ef 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -157,6 +157,9 @@ type Client struct { // FromUsernamePassword uses a username and password to get an access token. func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.Password) qv.Set(username, authParameters.Username) qv.Set(password, authParameters.Password) @@ -219,6 +222,9 @@ func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenRes qv.Set(clientID, req.AuthParams.ClientID) qv.Set(clientInfo, clientInfoVal) addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } return c.doTokenResp(ctx, req.AuthParams, qv) } @@ -233,6 +239,9 @@ func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParam return TokenResponse{}, err } } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.RefreshToken) qv.Set(clientID, authParams.ClientID) qv.Set(clientInfo, clientInfoVal) @@ -245,6 +254,9 @@ func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParam // FromClientSecret uses a client's secret (aka password) to get a new token. func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.ClientCredential) qv.Set("client_secret", clientSecret) qv.Set(clientID, authParameters.ClientID) @@ -259,6 +271,9 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.ClientCredential) qv.Set("client_assertion_type", grant.ClientAssertion) qv.Set("client_assertion", assertion) @@ -275,6 +290,9 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.JWT) qv.Set(clientID, authParameters.ClientID) qv.Set("client_secret", clientSecret) @@ -288,6 +306,9 @@ func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameter func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.JWT) qv.Set("client_assertion_type", grant.ClientAssertion) qv.Set("client_assertion", assertion) @@ -302,6 +323,9 @@ func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authPara func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } qv.Set(clientID, authParameters.ClientID) addScopeQueryParam(qv, authParameters) @@ -318,6 +342,9 @@ func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.A func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(grantType, grant.DeviceCode) qv.Set(deviceCode, deviceCodeResult.DeviceCode) qv.Set(clientID, authParameters.ClientID) @@ -329,6 +356,9 @@ func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authori func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } qv.Set(username, authParameters.Username) qv.Set(password, authParameters.Password) qv.Set(clientID, authParameters.ClientID) @@ -406,6 +436,15 @@ func AppendDefaultScopes(authParameters authority.AuthParams) []string { return scopes } +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { scopes := AppendDefaultScopes(authParameters) queryParams.Set("scope", strings.Join(scopes, " ")) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go index cc847001979..b3892bf3f32 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -148,10 +148,13 @@ func (c *ClientInfo) UnmarshalJSON(b []byte) error { // HomeAccountID creates the home account ID. func (c ClientInfo) HomeAccountID() string { - if c.UID == "" || c.UTID == "" { + if c.UID == "" { return "" + } else if c.UTID == "" { + return fmt.Sprintf("%s.%s", c.UID, c.UID) + } else { + return fmt.Sprintf("%s.%s", c.UID, c.UTID) } - return fmt.Sprintf("%s.%s", c.UID, c.UTID) } // Scopes represents scopes in a TokenResponse. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 4724d944ff8..de5f053f7d1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -7,12 +7,14 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" + "path" "strings" "time" @@ -156,9 +158,17 @@ type AuthParams struct { SendX5C bool // UserAssertion is the access token used to acquire token on behalf of user UserAssertion string - + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string // KnownAuthorityHosts don't require metadata discovery because they're known to the user KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string } // NewAuthParams creates an authorization parameters object. @@ -170,15 +180,127 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { } } +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + // Info consists of information about the authority. type Info struct { - Host string - CanonicalAuthorityURI string - AuthorityType string - UserRealmURIPrefix string - ValidateAuthority bool - Tenant string - Region string + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool } func firstPathSegment(u *url.URL) (string, error) { @@ -191,7 +313,7 @@ func firstPathSegment(u *url.URL) (string, error) { } // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. -func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool) (Info, error) { +func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { authorityURI = strings.ToLower(authorityURI) var authorityType string u, err := url.Parse(authorityURI) @@ -213,13 +335,15 @@ func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool) (Info, return Info{}, err } + // u.Host includes the port, if any, which is required for private cloud deployments return Info{ - Host: u.Hostname(), - CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Hostname(), tenant), - AuthorityType: authorityType, - UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), - ValidateAuthority: validateAuthority, - Tenant: tenant, + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 893ef4814f7..0ade411797a 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -46,9 +46,6 @@ func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { // ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { - if authorityInfo.AuthorityType == ADFS && len(userPrincipalName) == 0 { - return authority.Endpoints{}, errors.New("UPN required for authority validation for ADFS") - } if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { return endpoints, nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 00000000000..4561d72db4d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index 5e1ea912912..c3651e630be 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "0.7.0" +const Version = "0.8.1" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 19118c25a2c..0a3ffafff4a 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -35,6 +35,7 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" "github.com/google/uuid" "github.com/pkg/browser" @@ -59,6 +60,10 @@ type Options struct { // The HTTP client used for making requests. // It defaults to a shared http.Client. HTTPClient ops.HTTPClient + + capabilities []string + + disableInstanceDiscovery bool } func (p *Options) validate() error { @@ -89,6 +94,15 @@ func WithCache(accessor cache.ExportReplace) Option { } } +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *Options) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + // WithHTTPClient allows for a custom HTTP client to be set. func WithHTTPClient(httpClient ops.HTTPClient) Option { return func(o *Options) { @@ -96,6 +110,13 @@ func WithHTTPClient(httpClient ops.HTTPClient) Option { } } +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *Options) { + o.disableInstanceDiscovery = !enabled + } +} + // Client is a representation of authentication client for public applications as defined in the // package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. type Client struct { @@ -116,16 +137,128 @@ func New(clientID string, options ...Option) (Client, error) { return Client{}, err } - base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithCacheAccessor(opts.Accessor)) + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithCacheAccessor(opts.Accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) if err != nil { return Client{}, err } return Client{base}, nil } +// createAuthCodeURLOptions contains options for CreateAuthCodeURL +type createAuthCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// CreateAuthCodeURLOption is implemented by options for CreateAuthCodeURL +type CreateAuthCodeURLOption interface { + createAuthCodeURLOption() +} + // CreateAuthCodeURL creates a URL used to acquire an authorization code. -func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { - return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, pca.base.AuthParams) +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...CreateAuthCodeURLOption) (string, error) { + o := createAuthCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + CreateAuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + CreateAuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *AcquireTokenSilentOptions: + t.claims = claims + case *createAuthCodeURLOptions: + t.claims = claims + case *InteractiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + CreateAuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + CreateAuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *AcquireTokenSilentOptions: + t.tenantID = tenantID + case *createAuthCodeURLOptions: + t.tenantID = tenantID + case *InteractiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } } // AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. @@ -133,41 +266,90 @@ func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI s type AcquireTokenSilentOptions struct { // Account represents the account to use. To set, use the WithSilentAccount() option. Account Account + + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() } // AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) +func (AcquireTokenSilentOption) acquireSilentOption() {} + // WithSilentAccount uses the passed account during an AcquireTokenSilent() call. -func WithSilentAccount(account Account) AcquireTokenSilentOption { - return func(a *AcquireTokenSilentOptions) { - a.Account = account +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenSilentOptions: + t.Account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), } } // AcquireTokenSilent acquires a token from either the cache or using a refresh token. -func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { - opts := AcquireTokenSilentOptions{} - for _, o := range options { - o(&opts) +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := AcquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err } silentParameters := base.AcquireTokenSilentParameters{ Scopes: scopes, - Account: opts.Account, + Account: o.Account, + Claims: o.claims, RequestType: accesstokens.ATPublic, IsAppCache: false, + TenantID: o.tenantID, } return pca.base.AcquireTokenSilent(ctx, silentParameters) } +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + // AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. // NOTE: this flow is NOT recommended. -func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (AuthResult, error) { - authParams := pca.base.AuthParams +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } authParams.Scopes = scopes authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims authParams.Username = username authParams.Password = password @@ -203,12 +385,32 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) } +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + // AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. // Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. -func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (DeviceCode, error) { - authParams := pca.base.AuthParams +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } authParams.Scopes = scopes authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims dc, err := pca.base.Token.DeviceCode(ctx, authParams) if err != nil { @@ -221,32 +423,61 @@ func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string) // AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. type AcquireTokenByAuthCodeOptions struct { Challenge string + + claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() } // AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) +func (AcquireTokenByAuthCodeOption) acquireByAuthCodeOption() {} + // WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. -func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { - return func(a *AcquireTokenByAuthCodeOptions) { - a.Challenge = challenge +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *AcquireTokenByAuthCodeOptions: + t.Challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), } } // AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. // The specified redirect URI must be the same URI that was used when the authorization code was requested. -func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { - opts := AcquireTokenByAuthCodeOptions{} - for _, o := range options { - o(&opts) +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := AcquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err } params := base.AcquireTokenAuthCodeParameters{ Scopes: scopes, Code: code, - Challenge: opts.Challenge, + Challenge: o.Challenge, + Claims: o.claims, AppType: accesstokens.ATPublic, RedirectURI: redirectURI, + TenantID: o.tenantID, } return pca.base.AcquireTokenByAuthCode(ctx, params) @@ -269,24 +500,105 @@ type InteractiveAuthOptions struct { // Used to specify a custom port for the local server. http://localhost:portnumber // All other URI components are ignored. RedirectURI string + + claims, loginHint, tenantID, domainHint string +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() } // InteractiveAuthOption changes options inside InteractiveAuthOptions used in .AcquireTokenInteractive(). type InteractiveAuthOption func(*InteractiveAuthOptions) +func (InteractiveAuthOption) acquireInteractiveOption() {} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + CreateAuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + CreateAuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *createAuthCodeURLOptions: + t.loginHint = username + case *InteractiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + CreateAuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + CreateAuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *createAuthCodeURLOptions: + t.domainHint = domain + case *InteractiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // WithRedirectURI uses the specified redirect URI for interactive auth. -func WithRedirectURI(redirectURI string) InteractiveAuthOption { - return func(o *InteractiveAuthOptions) { - o.RedirectURI = redirectURI +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *InteractiveAuthOptions: + t.RedirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), } } // AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. // https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication -func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, options ...InteractiveAuthOption) (AuthResult, error) { - opts := InteractiveAuthOptions{} - for _, opt := range options { - opt(&opts) +// +// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := InteractiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err } // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 @@ -295,17 +607,23 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, return AuthResult{}, err } var redirectURL *url.URL - if opts.RedirectURI != "" { - redirectURL, err = url.Parse(opts.RedirectURI) + if o.RedirectURI != "" { + redirectURL, err = url.Parse(o.RedirectURI) if err != nil { return AuthResult{}, err } } - authParams := pca.base.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } authParams.Scopes = scopes authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims authParams.CodeChallenge = challenge authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint authParams.State = uuid.New().String() authParams.Prompt = "select_account" res, err := pca.browserLogin(ctx, redirectURL, authParams) diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml new file mode 100644 index 00000000000..47a6a46ec2a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md new file mode 100644 index 00000000000..3673df487f9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/README.md @@ -0,0 +1,33 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end. + +godoc.org does not support modules yet, +so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go new file mode 100644 index 00000000000..3676ee405d8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go new file mode 100644 index 00000000000..fcff86c1b3d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/context.go @@ -0,0 +1,66 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go new file mode 100644 index 00000000000..cb11cc1d21e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/exponential.go @@ -0,0 +1,154 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go new file mode 100644 index 00000000000..6c776ccf8ed --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/retry.go @@ -0,0 +1,96 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go new file mode 100644 index 00000000000..ed699e0e300 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/ticker.go @@ -0,0 +1,94 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v3/timer.go b/vendor/github.com/cenkalti/backoff/v3/timer.go new file mode 100644 index 00000000000..8120d0213c5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go new file mode 100644 index 00000000000..cfeefd9b764 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml index 9d00fdd5d66..77a6bccf77e 100644 --- a/vendor/github.com/go-errors/errors/.travis.yml +++ b/vendor/github.com/go-errors/errors/.travis.yml @@ -3,3 +3,6 @@ language: go go: - "1.8.x" - "1.10.x" + - "1.13.x" + - "1.14.x" + - "1.16.x" diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md index 5d4f1873ddf..2ee13f11747 100644 --- a/vendor/github.com/go-errors/errors/README.md +++ b/vendor/github.com/go-errors/errors/README.md @@ -64,3 +64,18 @@ packages by Facebook and Dropbox, it was moved to one canonical location so everyone can benefit. This package is licensed under the MIT license, see LICENSE.MIT for details. + + +## Changelog +* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is +* v1.2.0 added `errors.As` from the standard library. +* v1.3.0 *BREAKING* updated error methods to return `error` instead of `*Error`. +> Code that needs access to the underlying `*Error` can use the new errors.AsError(e) +> ``` +> // before +> errors.New(err).ErrorStack() +> // after +>. errors.AsError(errors.Wrap(err)).ErrorStack() +> ``` +* v1.4.0 *BREAKING* v1.4.0 reverted all changes from v1.3.0 and is identical to v1.2.0 +* v1.4.1 no code change, but now without an unnecessary cover.out file. diff --git a/vendor/github.com/go-errors/errors/cover.out b/vendor/github.com/go-errors/errors/cover.out deleted file mode 100644 index ab18b0519fc..00000000000 --- a/vendor/github.com/go-errors/errors/cover.out +++ /dev/null @@ -1,89 +0,0 @@ -mode: set -github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1 -github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1 -github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0 -github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1 -github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1 -github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1 -github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1 -github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1 -github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0 -github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1 -github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1 -github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1 -github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0 -github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0 -github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1 -github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1 -github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1 -github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1 -github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1 -github.com/go-errors/errors/error.go:70.32,73.23 2 1 -github.com/go-errors/errors/error.go:80.2,85.3 3 1 -github.com/go-errors/errors/error.go:74.2,75.10 1 1 -github.com/go-errors/errors/error.go:76.2,77.28 1 1 -github.com/go-errors/errors/error.go:92.43,95.23 2 1 -github.com/go-errors/errors/error.go:104.2,109.3 3 1 -github.com/go-errors/errors/error.go:96.2,97.11 1 1 -github.com/go-errors/errors/error.go:98.2,99.10 1 1 -github.com/go-errors/errors/error.go:100.2,101.28 1 1 -github.com/go-errors/errors/error.go:115.39,117.19 1 1 -github.com/go-errors/errors/error.go:121.2,121.29 1 1 -github.com/go-errors/errors/error.go:125.2,125.43 1 1 -github.com/go-errors/errors/error.go:129.2,129.14 1 1 -github.com/go-errors/errors/error.go:117.19,119.3 1 1 -github.com/go-errors/errors/error.go:121.29,123.3 1 1 -github.com/go-errors/errors/error.go:125.43,127.3 1 1 -github.com/go-errors/errors/error.go:135.53,137.2 1 1 -github.com/go-errors/errors/error.go:140.34,142.2 1 1 -github.com/go-errors/errors/error.go:146.34,149.42 2 1 -github.com/go-errors/errors/error.go:153.2,153.20 1 1 -github.com/go-errors/errors/error.go:149.42,151.3 1 1 -github.com/go-errors/errors/error.go:158.39,160.2 1 1 -github.com/go-errors/errors/error.go:164.46,165.23 1 1 -github.com/go-errors/errors/error.go:173.2,173.19 1 1 -github.com/go-errors/errors/error.go:165.23,168.32 2 1 -github.com/go-errors/errors/error.go:168.32,170.4 1 1 -github.com/go-errors/errors/error.go:177.37,178.42 1 1 -github.com/go-errors/errors/error.go:181.2,181.41 1 1 -github.com/go-errors/errors/error.go:178.42,180.3 1 1 -github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1 -github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1 -github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1 -github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0 -github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1 -github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1 -github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1 -github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0 -github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1 -github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1 -github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1 -github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1 -github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1 -github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1 -github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1 -github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1 -github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1 -github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1 -github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1 -github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0 -github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0 -github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1 -github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1 -github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1 -github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1 -github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1 -github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1 -github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1 -github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1 -github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1 -github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1 -github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1 -github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0 -github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1 -github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1 -github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1 -github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0 -github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0 -github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1 -github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0 diff --git a/vendor/github.com/go-errors/errors/error.go b/vendor/github.com/go-errors/errors/error.go index 60062a4372b..ccbc2e4272d 100644 --- a/vendor/github.com/go-errors/errors/error.go +++ b/vendor/github.com/go-errors/errors/error.go @@ -91,6 +91,10 @@ func New(e interface{}) *Error { // fmt.Errorf("%v"). The skip parameter indicates how far up the stack // to start the stacktrace. 0 is from the current call, 1 from its caller, etc. func Wrap(e interface{}, skip int) *Error { + if e == nil { + return nil + } + var err error switch e := e.(type) { @@ -117,6 +121,9 @@ func Wrap(e interface{}, skip int) *Error { // up the stack to start the stacktrace. 0 is from the current call, // 1 from its caller, etc. func WrapPrefix(e interface{}, prefix string, skip int) *Error { + if e == nil { + return nil + } err := Wrap(e, 1+skip) @@ -132,26 +139,6 @@ func WrapPrefix(e interface{}, prefix string, skip int) *Error { } -// Is detects whether the error is equal to a given error. Errors -// are considered equal by this function if they are the same object, -// or if they both contain the same error inside an errors.Error. -func Is(e error, original error) bool { - - if e == original { - return true - } - - if e, ok := e.(*Error); ok { - return Is(e.Err, original) - } - - if original, ok := original.(*Error); ok { - return Is(e, original.Err) - } - - return false -} - // Errorf creates a new error with the given message. You can use it // as a drop-in replacement for fmt.Errorf() to provide descriptive // errors in return values. @@ -215,3 +202,8 @@ func (err *Error) TypeName() string { } return reflect.TypeOf(err.Err).String() } + +// Return the wrapped error (implements api for As function). +func (err *Error) Unwrap() error { + return err.Err +} diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go new file mode 100644 index 00000000000..0af2fc80659 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_1_13.go @@ -0,0 +1,31 @@ +// +build go1.13 + +package errors + +import ( + baseErrors "errors" +) + +// find error in any wrapped error +func As(err error, target interface{}) bool { + return baseErrors.As(err, target) +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are matched by errors.Is +// or if their contained errors are matched through errors.Is +func Is(e error, original error) bool { + if baseErrors.Is(e, original) { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go new file mode 100644 index 00000000000..80b0695e7e8 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_backward.go @@ -0,0 +1,57 @@ +// +build !go1.13 + +package errors + +import ( + "reflect" +) + +type unwrapper interface { + Unwrap() error +} + +// As assigns error or any wrapped error to the value target points +// to. If there is no value of the target type of target As returns +// false. +func As(err error, target interface{}) bool { + targetType := reflect.TypeOf(target) + + for { + errType := reflect.TypeOf(err) + + if errType == nil { + return false + } + + if reflect.PtrTo(errType) == targetType { + reflect.ValueOf(target).Elem().Set(reflect.ValueOf(err)) + return true + } + + wrapped, ok := err.(unwrapper) + if ok { + err = wrapped.Unwrap() + } else { + return false + } + } +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are the same object, +// or if they both contain the same error inside an errors.Error. +func Is(e error, original error) bool { + if e == original { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/stackframe.go b/vendor/github.com/go-errors/errors/stackframe.go index 750ab9a5215..f420849d25a 100644 --- a/vendor/github.com/go-errors/errors/stackframe.go +++ b/vendor/github.com/go-errors/errors/stackframe.go @@ -1,9 +1,10 @@ package errors import ( + "bufio" "bytes" "fmt" - "io/ioutil" + "os" "runtime" "strings" ) @@ -62,18 +63,29 @@ func (frame *StackFrame) String() string { // SourceLine gets the line of code (from File and Line) of the original source if possible. func (frame *StackFrame) SourceLine() (string, error) { - data, err := ioutil.ReadFile(frame.File) + if frame.LineNumber <= 0 { + return "???", nil + } + file, err := os.Open(frame.File) if err != nil { return "", New(err) } - - lines := bytes.Split(data, []byte{'\n'}) - if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { - return "???", nil + defer file.Close() + + scanner := bufio.NewScanner(file) + currentLine := 1 + for scanner.Scan() { + if currentLine == frame.LineNumber { + return string(bytes.Trim(scanner.Bytes(), " \t")), nil + } + currentLine++ } - // -1 because line-numbers are 1 based, but our array is 0 based - return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil + if err := scanner.Err(); err != nil { + return "", New(err) + } + + return "???", nil } func packageAndName(fn *runtime.Func) (string, string) { diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE index ccae99f6a9a..95a0f0541cd 100644 --- a/vendor/github.com/hashicorp/go-msgpack/LICENSE +++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -1,25 +1,22 @@ -Copyright (c) 2012, 2013 Ugorji Nwoke. +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the author nor the names of its contributors may be used - to endorse or promote products derived from this software - without specific prior written permission. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go deleted file mode 100644 index c14d810a73e..00000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Representative Benchmark Results - -Run the benchmark suite using: - go test -bi -bench=. -benchmem - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext_dep_test.go - -*/ -package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md deleted file mode 100644 index 6c95d1bfd20..00000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Codec - -High Performance and Feature-Rich Idiomatic Go Library providing -encode/decode support for different serialization formats. - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -Online documentation: [http://godoc.org/github.com/ugorji/go/codec] - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Representative Benchmark Results - -A sample run of benchmark using "go test -bi -bench=. -benchmem": - - /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) - - .............................................. - BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT - To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." - Benchmark: - Struct recursive Depth: 1 - ApproxDeepSize Of benchmark Struct: 4694 bytes - Benchmark One-Pass Run: - v-msgpack: len: 1600 bytes - bson: len: 3025 bytes - msgpack: len: 1560 bytes - binc: len: 1187 bytes - gob: len: 1972 bytes - json: len: 2538 bytes - .............................................. - PASS - Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op - Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op - Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op - Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op - Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op - Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op - Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op - Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op - Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op - Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op - Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op - Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op - Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op - Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op - ok ugorji.net/codec 30.827s - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext\_dep\_test.go - diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go index 2bb5e8fee85..fd9f489063a 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -1,20 +1,16 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( "math" - // "reflect" - // "sync/atomic" + "reflect" "time" - //"fmt" ) const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. -//var _ = fmt.Printf - // vd as low 4 bits (there are 16 slots) const ( bincVdSpecial byte = iota @@ -59,31 +55,79 @@ const ( // others not currently supported ) +func bincdesc(vd, vs byte) string { + switch vd { + case bincVdSpecial: + switch vs { + case bincSpNil: + return "nil" + case bincSpFalse: + return "false" + case bincSpTrue: + return "true" + case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat: + return "float" + case bincSpZero: + return "uint" + case bincSpNegOne: + return "int" + default: + return "unknown" + } + case bincVdSmallInt, bincVdPosInt: + return "uint" + case bincVdNegInt: + return "int" + case bincVdFloat: + return "float" + case bincVdSymbol: + return "string" + case bincVdString: + return "string" + case bincVdByteArray: + return "bytes" + case bincVdTimestamp: + return "time" + case bincVdCustomExt: + return "ext" + case bincVdArray: + return "array" + case bincVdMap: + return "map" + default: + return "unknown" + } +} + type bincEncDriver struct { - w encWriter + e *Encoder + h *BincHandle + w *encWriterSwitch m map[string]uint16 // symbols - s uint32 // symbols sequencer - b [8]byte + b [16]byte // scratch, used for encoding numbers - bigendian style + s uint16 // symbols sequencer + // c containerState + encDriverTrackContainerWriter + noBuiltInTypes + // encNoSeparator + _ [1]uint64 // padding } -func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) } -func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - bs := encodeTime(v.(time.Time)) +func (e *bincEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else { + bs := bincEncodeTime(t) e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) e.w.writeb(bs) } } -func (e *bincEncDriver) encodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) encodeBool(b bool) { +func (e *bincEncDriver) EncodeBool(b bool) { if b { e.w.writen1(bincVdSpecial<<4 | bincSpTrue) } else { @@ -91,21 +135,21 @@ func (e *bincEncDriver) encodeBool(b bool) { } } -func (e *bincEncDriver) encodeFloat32(f float32) { +func (e *bincEncDriver) EncodeFloat32(f float32) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } e.w.writen1(bincVdFloat<<4 | bincFlBin32) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *bincEncDriver) encodeFloat64(f float64) { +func (e *bincEncDriver) EncodeFloat64(f float64) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } - bigen.PutUint64(e.b[:], math.Float64bits(f)) + bigen.PutUint64(e.b[:8], math.Float64bits(f)) if bincDoPrune { i := 7 for ; i >= 0 && (e.b[i] == 0); i-- { @@ -119,7 +163,7 @@ func (e *bincEncDriver) encodeFloat64(f float64) { } } e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:]) + e.w.writeb(e.b[:8]) } func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { @@ -138,64 +182,71 @@ func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) } } -func (e *bincEncDriver) encodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - switch { - case v >= 0: +func (e *bincEncDriver) EncodeInt(v int64) { + // const nbd byte = bincVdNegInt << 4 + if v >= 0 { e.encUint(bincVdPosInt<<4, true, uint64(v)) - case v == -1: + } else if v == -1 { e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - default: + } else { e.encUint(bincVdNegInt<<4, false, uint64(-v)) } } -func (e *bincEncDriver) encodeUint(v uint64) { +func (e *bincEncDriver) EncodeUint(v uint64) { e.encUint(bincVdPosInt<<4, true, v) } func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - switch { - case v == 0: + if v == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZero) - case pos && v >= 1 && v <= 16: + } else if pos && v >= 1 && v <= 16 { e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - case v <= math.MaxUint8: + } else if v <= math.MaxUint8 { e.w.writen2(bd|0x0, byte(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.encIntegerPrune(bd, pos, v, 4) - default: + } else { e.encIntegerPrune(bd, pos, v, 8) } } +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(bincVdCustomExt<<4, uint64(length)) e.w.writen1(xtag) } -func (e *bincEncDriver) encodeArrayPreamble(length int) { +func (e *bincEncDriver) WriteArrayStart(length int) { e.encLen(bincVdArray<<4, uint64(length)) + e.c = containerArrayStart } -func (e *bincEncDriver) encodeMapPreamble(length int) { +func (e *bincEncDriver) WriteMapStart(length int) { e.encLen(bincVdMap<<4, uint64(length)) + e.c = containerMapStart } -func (e *bincEncDriver) encodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeSymbol(v string) { +func (e *bincEncDriver) EncodeSymbol(v string) { // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) + // e.encodeString(cUTF8, v) // return // } @@ -204,12 +255,11 @@ func (e *bincEncDriver) encodeSymbol(v string) { //(bd with embedded length, and single byte for string val). l := len(v) - switch l { - case 0: - e.encBytesLen(c_UTF8, 0) + if l == 0 { + e.encBytesLen(cUTF8, 0) return - case 1: - e.encBytesLen(c_UTF8, 1) + } else if l == 1 { + e.encBytesLen(cUTF8, 1) e.w.writen1(v[0]) return } @@ -222,45 +272,72 @@ func (e *bincEncDriver) encodeSymbol(v string) { e.w.writen2(bincVdSymbol<<4, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8) - e.w.writeUint16(ui) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) } } else { e.s++ - ui = uint16(e.s) + ui = e.s //ui = uint16(atomic.AddUint32(&e.s, 1)) e.m[v] = ui var lenprec uint8 - switch { - case l <= math.MaxUint8: + if l <= math.MaxUint8 { // lenprec = 0 - case l <= math.MaxUint16: + } else if l <= math.MaxUint16 { lenprec = 1 - case int64(l) <= math.MaxUint32: + } else if int64(l) <= math.MaxUint32 { lenprec = 2 - default: + } else { lenprec = 3 } if ui <= math.MaxUint8 { e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - e.w.writeUint16(ui) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) } - switch lenprec { - case 0: + if lenprec == 0 { e.w.writen1(byte(l)) - case 1: - e.w.writeUint16(uint16(l)) - case 2: - e.w.writeUint32(uint32(l)) - default: - e.w.writeUint64(uint64(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) } e.w.writestr(v) } } -func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringEnc(c charEncoding, v string) { + if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + l := uint64(len(v)) + e.encLen(bincVdString<<4, l) // e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } + +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + return + } l := uint64(len(v)) e.encBytesLen(c, l) if l > 0 { @@ -268,9 +345,21 @@ func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { } } +func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) { + if v == nil { + e.EncodeNil() + return + } + l := uint64(len(v)) + e.encLen(bincVdByteArray<<4, l) // e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { + if c == cRAW { e.encLen(bincVdByteArray<<4, length) } else { e.encLen(bincVdString<<4, length) @@ -286,93 +375,90 @@ func (e *bincEncDriver) encLen(bd byte, l uint64) { } func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - switch { - case v <= math.MaxUint8: + if v <= math.MaxUint8 { e.w.writen2(bd, byte(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.w.writen1(bd | 0x02) - e.w.writeUint32(uint32(v)) - default: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { e.w.writen1(bd | 0x03) - e.w.writeUint64(uint64(v)) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) } } //------------------------------------ +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + type bincDecDriver struct { - r decReader + decDriverNoopContainerReader + noBuiltInTypes + + d *Decoder + h *BincHandle + r *decReaderSwitch + br bool // bytes reader bdRead bool - bdType valueType bd byte vd byte vs byte - b [8]byte - m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) + _ [3]byte // padding + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol + + // noStreamingCodec + // decNoSeparator + + b [(8 + 1) * 8]byte // scratch } -func (d *bincDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *bincDecDriver) readNextBd() { d.bd = d.r.readn1() d.vd = d.bd >> 4 d.vs = d.bd & 0x0f d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *bincDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - d.bdType = valueTypeNil - case bincSpFalse, bincSpTrue: - d.bdType = valueTypeBool - case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: - d.bdType = valueTypeFloat - case bincSpZero: - d.bdType = valueTypeUint - case bincSpNegOne: - d.bdType = valueTypeInt - default: - decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - d.bdType = valueTypeUint - case bincVdPosInt: - d.bdType = valueTypeUint - case bincVdNegInt: - d.bdType = valueTypeInt - case bincVdFloat: - d.bdType = valueTypeFloat - case bincVdString: - d.bdType = valueTypeString - case bincVdSymbol: - d.bdType = valueTypeSymbol - case bincVdByteArray: - d.bdType = valueTypeBytes - case bincVdTimestamp: - d.bdType = valueTypeTimestamp - case bincVdCustomExt: - d.bdType = valueTypeExt - case bincVdArray: - d.bdType = valueTypeArray - case bincVdMap: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) - } +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false } - return d.bdType } -func (d *bincDecDriver) tryDecodeAsNil() bool { +func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } if d.bd == bincVdSpecial<<4|bincSpNil { d.bdRead = false return true @@ -380,24 +466,24 @@ func (d *bincDecDriver) tryDecodeAsNil() bool { return false } -func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - if d.vd != bincVdTimestamp { - decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - } - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt +func (d *bincDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { d.bdRead = false + return } + if d.vd != bincVdTimestamp { + d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + t, err := bincDecodeTime(d.r.readx(uint(d.vs))) + if err != nil { + panic(err) + } + d.bdRead = false + return } func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { @@ -406,7 +492,8 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } else { l := d.r.readn1() if l > 8 { - decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l) + return } for i := l; i < 8; i++ { d.b[i] = 0 @@ -416,16 +503,17 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(d.r.readUint64()); break; } - switch vs := d.vs; vs & 0x7 { - case bincFlBin32: - d.decFloatPre(vs, 4) + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - case bincFlBin64: - d.decFloatPre(vs, 8) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - default: - decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } else { + d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", + msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } return } @@ -436,130 +524,148 @@ func (d *bincDecDriver) decUint() (v uint64) { case 0: v = uint64(d.r.readn1()) case 1: - d.r.readb(d.b[6:]) - v = uint64(bigen.Uint16(d.b[6:])) + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) case 2: d.b[4] = 0 - d.r.readb(d.b[5:]) - v = uint64(bigen.Uint32(d.b[4:])) + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) case 3: - d.r.readb(d.b[4:]) - v = uint64(bigen.Uint32(d.b[4:])) + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:]) - for i := 0; i < lim; i++ { + lim := 7 - d.vs + d.r.readb(d.b[lim:8]) + for i := uint8(0); i < lim; i++ { d.b[i] = 0 } - v = uint64(bigen.Uint64(d.b[:])) + v = uint64(bigen.Uint64(d.b[:8])) case 7: - d.r.readb(d.b[:]) - v = uint64(bigen.Uint64(d.b[:])) + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) default: - decErr("unsigned integers with greater than 64 bits of precision not supported") + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return } return } -func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.vd { - case bincVdPosInt: +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { ui = d.decUint() - i = int64(ui) - case bincVdNegInt: + } else if vd == bincVdNegInt { ui = d.decUint() - i = -(int64(ui)) neg = true - case bincVdSmallInt: - i = int64(d.vs) + 1 + } else if vd == bincVdSmallInt { ui = uint64(d.vs) + 1 - case bincVdSpecial: - switch d.vs { - case bincSpZero: + } else if vd == bincVdSpecial { + if vs == bincSpZero { //i = 0 - case bincSpNegOne: + } else if vs == bincSpNegOne { neg = true ui = 1 - i = -1 - default: - decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } else { + d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } - default: - decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } else { + d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return } return } -func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) +func (d *bincDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } d.bdRead = false return } -func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() +func (d *bincDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value to unsigned integer type") + return } - checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.vd { - case bincVdSpecial: +func (d *bincDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { d.bdRead = false - switch d.vs { - case bincSpNan: + if vs == bincSpNan { return math.NaN() - case bincSpPosInf: + } else if vs == bincSpPosInf { return math.Inf(1) - case bincSpZeroFloat, bincSpZero: + } else if vs == bincSpZeroFloat || vs == bincSpZero { return - case bincSpNegInf: + } else if vs == bincSpNegInf { return math.Inf(-1) - default: - decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } else { + d.d.errorf("float - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } - case bincVdFloat: + } else if vd == bincVdFloat { f = d.decFloat() - default: - _, i, _ := d.decIntAny() - f = float64(i) + } else { + f = float64(d.DecodeInt64()) } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *bincDecDriver) decodeBool() (b bool) { - switch d.bd { - case (bincVdSpecial | bincSpFalse): +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { // b = false - case (bincVdSpecial | bincSpTrue): + } else if bd == (bincVdSpecial | bincSpTrue) { b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else { + d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) readMapLen() (length int) { +func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdMap { - decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } length = d.decLen() d.bdRead = false return } -func (d *bincDecDriver) readArrayLen() (length int) { +func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdArray { - decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } length = d.decLen() d.bdRead = false @@ -567,189 +673,299 @@ func (d *bincDecDriver) readArrayLen() (length int) { } func (d *bincDecDriver) decLen() int { - if d.vs <= 3 { - return int(d.decUint()) + if d.vs > 3 { + return int(d.vs - 4) } - return int(d.vs - 4) + return int(d.decLenNumber()) } -func (d *bincDecDriver) decodeString() (s string) { +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) ( + bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen = -1 + // var ok bool switch d.vd { case bincVdString, bincVdByteArray: - if length := d.decLen(); length > 0 { - s = string(d.r.readn(length)) + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(uint(slen)) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) } case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, //extract symbol //if containsStringVal, read it and put in map //else look in map for string value - var symbol uint32 + var symbol uint16 vs := d.vs - //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) if vs&0x8 == 0 { - symbol = uint32(d.r.readn1()) + symbol = uint16(d.r.readn1()) } else { - symbol = uint32(d.r.readUint16()) + symbol = uint16(bigen.Uint16(d.r.readx(2))) } - if d.m == nil { - d.m = make(map[uint32]string, 16) + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) } if vs&0x4 == 0 { - s = d.m[symbol] + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } } else { - var slen int switch vs & 0x3 { case 0: slen = int(d.r.readn1()) case 1: - slen = int(d.r.readUint16()) + slen = int(bigen.Uint16(d.r.readx(2))) case 2: - slen = int(d.r.readUint32()) + slen = int(bigen.Uint32(d.r.readx(4))) case 3: - slen = int(d.r.readUint64()) + slen = int(bigen.Uint64(d.r.readx(8))) } - s = string(d.r.readn(slen)) - d.m[symbol] = s + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) } default: - decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.vd == bincVdArray { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } var clen int - switch d.vd { - case bincVdString, bincVdByteArray: + if d.vd == bincVdString || d.vd == bincVdByteArray { clen = d.decLen() - default: - decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) + } else { + d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } return } -func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.vd { - case bincVdCustomExt: +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) + return } - xbs = d.r.readn(l) - case bincVdByteArray: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + if d.br { + xbs = d.r.readx(uint(l)) + } else { + xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, true) + } else { + d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", + msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool switch d.vd { case bincVdSpecial: switch d.vs { case bincSpNil: - vt = valueTypeNil + n.v = valueTypeNil case bincSpFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case bincSpTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case bincSpNan: - vt = valueTypeFloat - v = math.NaN() + n.v = valueTypeFloat + n.f = math.NaN() case bincSpPosInf: - vt = valueTypeFloat - v = math.Inf(1) + n.v = valueTypeFloat + n.f = math.Inf(1) case bincSpNegInf: - vt = valueTypeFloat - v = math.Inf(-1) + n.v = valueTypeFloat + n.f = math.Inf(-1) case bincSpZeroFloat: - vt = valueTypeFloat - v = float64(0) + n.v = valueTypeFloat + n.f = float64(0) case bincSpZero: - vt = valueTypeUint - v = int64(0) // int8(0) + n.v = valueTypeUint + n.u = uint64(0) // int8(0) case bincSpNegOne: - vt = valueTypeInt - v = int64(-1) // int8(-1) + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) default: - decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) } case bincVdSmallInt: - vt = valueTypeUint - v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 case bincVdPosInt: - vt = valueTypeUint - v = d.decUint() + n.v = valueTypeUint + n.u = d.decUint() case bincVdNegInt: - vt = valueTypeInt - v = -(int64(d.decUint())) + n.v = valueTypeInt + n.i = -(int64(d.decUint())) case bincVdFloat: - vt = valueTypeFloat - v = d.decFloat() + n.v = valueTypeFloat + n.f = d.decFloat() case bincVdSymbol: - vt = valueTypeSymbol - v = d.decodeString() + n.v = valueTypeSymbol + n.s = d.DecodeString() case bincVdString: - vt = valueTypeString - v = d.decodeString() + n.v = valueTypeString + n.s = d.DecodeString() case bincVdByteArray: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case bincVdTimestamp: - vt = valueTypeTimestamp - tt, err := decodeTime(d.r.readn(int(d.vs))) + n.v = valueTypeTime + tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) if err != nil { panic(err) } - v = tt + n.t = tt case bincVdCustomExt: - vt = valueTypeExt + n.v = valueTypeExt l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt + n.u = uint64(d.r.readn1()) + if d.br { + n.l = d.r.readx(uint(l)) + } else { + n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } case bincVdArray: - vt = valueTypeArray + n.v = valueTypeArray decodeFurther = true case bincVdMap: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } if !decodeFurther { d.bdRead = false } - return + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } } //------------------------------------ @@ -764,23 +980,224 @@ func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurthe // extended precision and decimal IEEE 754 floats are unsupported. // - Only UTF-8 strings supported. // Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// //Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. type BincHandle struct { BasicHandle + binaryEncodingType + noElemSeparators + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Values: + // - 0: default: library uses best judgement + // - 1: use symbols + // - 2: do not use symbols + AsSymbols uint8 + + // AsSymbols: may later on introduce more options ... + // - m: map keys + // - s: struct fields + // - n: none + // - a: all: same as m, s, ... + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: binc +func (h *BincHandle) Name() string { return "binc" } + +// SetBytesExt sets an extension +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, h: h, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (h *BincHandle) newEncDriver(w encWriter) encDriver { - return &bincEncDriver{w: w} +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.c = 0 + e.m = nil } -func (h *BincHandle) newDecDriver(r decReader) decDriver { - return &bincDecDriver{r: r} +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 } -func (_ *BincHandle) writeExt() bool { - return true +// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func bincEncodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] } -func (h *BincHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +// bincDecodeTime decodes a []byte into a time.Time. +func bincDecodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printf.d. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + // i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return } + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/build.sh b/vendor/github.com/hashicorp/go-msgpack/codec/build.sh new file mode 100644 index 00000000000..dd79c13f81a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/build.sh @@ -0,0 +1,267 @@ +#!/bin/bash + +# Run all the different permutations of all the tests and other things +# This helps ensure that nothing gets broken. + +_tests() { + local gover=$( go version | cut -f 3 -d ' ' ) + # note that codecgen requires fastpath, so you cannot do "codecgen notfastpath" + local a=( "" "safe" "notfastpath" "notfastpath safe" "codecgen" "codecgen safe" ) + for i in "${a[@]}" + do + echo ">>>> TAGS: $i" + local i2=${i:-default} + case $gover in + go1.[0-6]*) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "$i" "$@" ;; + *) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;; + esac + if [[ "$?" != 0 ]]; then return 1; fi + done + echo "++++++++ TEST SUITES ALL PASSED ++++++++" +} + + +# is a generation needed? +_ng() { + local a="$1" + if [[ ! -e "$a" ]]; then echo 1; return; fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]]; then echo 1; return; fi + done +} + +_prependbt() { + cat > ${2} <> ${2} + rm -f ${1} +} + +# _build generates fast-path.go and gen-helper.go. +_build() { + if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi + + if [ "${zbak}" ]; then + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak + fi + rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \ + *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < " + fnameOut + " ______") +fin, err := os.Open(fnameIn) +if err != nil { panic(err) } +defer fin.Close() +fout, err := os.Create(fnameOut) +if err != nil { panic(err) } +defer fout.Close() +err = codec.GenInternalGoFile(fin, fout) +if err != nil { panic(err) } +} + +func main() { +run("fast-path.go.tmpl", "fast-path.generated.go") +run("gen-helper.go.tmpl", "gen-helper.generated.go") +run("mammoth-test.go.tmpl", "mammoth_generated_test.go") +run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go") +} +EOF + + sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \ + shared_test.go > bench/shared_test.go + + # explicitly return 0 if this passes, else return 1 + go run -tags "notfastpath safe codecgen.exec" gen-from-tmpl.generated.go && + rm -f gen-from-tmpl.*generated.go && + return 0 + return 1 +} + +_codegenerators() { + local c5="_generated_test.go" + local c7="$PWD/codecgen" + local c8="$c7/__codecgen" + local c9="codecgen-scratch.go" + + if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi + + # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen + true && + echo "codecgen ... " && + if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then + echo "rebuilding codecgen ... " && ( cd codecgen && go build -o $c8 ${zargs[*]} . ) + fi && + $c8 -rt codecgen -t 'codecgen generated' -o values_codecgen${c5} -d 19780 $zfin $zfin2 && + cp mammoth2_generated_test.go $c9 && + $c8 -t '!notfastpath' -o mammoth2_codecgen${c5} -d 19781 mammoth2_generated_test.go && + rm -f $c9 && + echo "generators done!" +} + +_prebuild() { + echo "prebuild: zforce: $zforce" + local d="$PWD" + zfin="test_values.generated.go" + zfin2="test_values_flex.generated.go" + zpkg="github.com/ugorji/go/codec" + # zpkg=${d##*/src/} + # zgobase=${d%%/src/*} + # rm -f *_generated_test.go + rm -f codecgen-*.go && + _build && + cp $d/values_test.go $d/$zfin && + cp $d/values_flex_test.go $d/$zfin2 && + _codegenerators && + if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi && + if [[ $zforce ]]; then go install ${zargs[*]} .; fi && + echo "prebuild done successfully" + rm -f $d/$zfin $d/$zfin2 + unset zfin zfin2 zpkg +} + +_make() { + zforce=1 + (cd codecgen && go install ${zargs[*]} .) && _prebuild && go install ${zargs[*]} . + unset zforce +} + +_clean() { + rm -f gen-from-tmpl.*generated.go \ + codecgen-*.go \ + test_values.generated.go test_values_flex.generated.go +} + +_release() { + local reply + read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply + echo + if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi + + # expects GOROOT, GOROOT_BOOTSTRAP to have been set. + if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi + # (cd $GOROOT && git checkout -f master && git pull && git reset --hard) + (cd $GOROOT && git pull) + local f=`pwd`/make.release.out + cat > $f <>$f + if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi + (false || + (echo "===== BUILDING GO SDK for branch: $i ... =====" && + cd $GOROOT && + git checkout -f $i && git reset --hard && git clean -f . && + cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && + echo "===== GO SDK BUILD DONE =====" && + _prebuild && + echo "===== PREBUILD DONE with exit: $? =====" && + _tests "$@" + if [[ "$?" != 0 ]]; then return 1; fi + done + unset zforce + echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" +} + +_usage() { + cat < [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector] + -v -> verbose +EOF + if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi +} + +_main() { + if [[ -z "$1" ]]; then _usage; return 1; fi + local x + unset zforce + zargs=() + zbenchflags="" + OPTIND=1 + while getopts ":ctmnrgpfvlzdb:" flag + do + case "x$flag" in + 'xf') zforce=1 ;; + 'xv') zverbose=1 ;; + 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; + 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; + 'xd') zargs+=("-race") ;; + 'xb') x='b'; zbenchflags=${OPTARG} ;; + x\?) _usage; return 1 ;; + *) x=$flag ;; + esac + done + shift $((OPTIND-1)) + # echo ">>>> _main: extra args: $@" + case "x$x" in + 'xt') _tests "$@" ;; + 'xm') _make "$@" ;; + 'xr') _release "$@" ;; + 'xg') _go ;; + 'xp') _prebuild "$@" ;; + 'xc') _clean "$@" ;; + 'xz') _analyze "$@" ;; + 'xb') _bench "$@" ;; + esac + unset zforce zargs zbenchflags +} + +[ "." = `dirname $0` ] && _main "$@" + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go b/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go new file mode 100644 index 00000000000..7833f9d68f0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go @@ -0,0 +1,767 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString byte = 0x7f + cborBdIndefiniteArray byte = 0x9f + cborBdIndefiniteMap byte = 0xbf + cborBdBreak byte = 0xff +) + +// These define some in-stream descriptors for +// manual encoding e.g. when doing explicit indefinite-length +const ( + CborStreamBytes byte = 0x5f + CborStreamString byte = 0x7f + CborStreamArray byte = 0x9f + CborStreamMap byte = 0xbf + CborStreamBreak byte = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt byte = 0x20 + cborBaseBytes byte = 0x40 + cborBaseString byte = 0x60 + cborBaseArray byte = 0x80 + cborBaseMap byte = 0xa0 + cborBaseTag byte = 0xc0 + cborBaseSimple byte = 0xe0 +) + +func cbordesc(bd byte) string { + switch bd { + case cborBdNil: + return "nil" + case cborBdFalse: + return "false" + case cborBdTrue: + return "true" + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + return "float" + case cborBdIndefiniteBytes: + return "bytes*" + case cborBdIndefiniteString: + return "string*" + case cborBdIndefiniteArray: + return "array*" + case cborBdIndefiniteMap: + return "map*" + default: + switch { + case bd >= cborBaseUint && bd < cborBaseNegInt: + return "(u)int" + case bd >= cborBaseNegInt && bd < cborBaseBytes: + return "int" + case bd >= cborBaseBytes && bd < cborBaseString: + return "bytes" + case bd >= cborBaseString && bd < cborBaseArray: + return "string" + case bd >= cborBaseArray && bd < cborBaseMap: + return "array" + case bd >= cborBaseMap && bd < cborBaseTag: + return "map" + case bd >= cborBaseTag && bd < cborBaseSimple: + return "ext" + default: + return "unknown" + } + } +} + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + e *Encoder + w *encWriterSwitch + h *CborHandle + x [8]byte + // _ [3]uint64 // padding +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else if e.h.TimeRFC3339 { + e.encUint(0, cborBaseTag) + e.EncodeStringEnc(cUTF8, t.Format(time.RFC3339Nano)) + } else { + e.encUint(1, cborBaseTag) + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + e.EncodeInt(sec) + } else { + e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) + } + } +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + // only encodes re.Value (never re.Data) + // if false && re.Data != nil { + // en.encode(re.Data) + // } else if re.Value != nil { + if re.Value != nil { + en.encode(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + } else if c == cRAW { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } else { + e.encStringBytesS(cborBaseString, stringView(v)) + } +} + +func (e *cborEncDriver) EncodeStringBytesRaw(v []byte) { + if v == nil { + e.EncodeNil() + } else { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } +} + +func (e *cborEncDriver) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + var vlen uint = uint(len(v)) + blen := vlen / 4 + if blen == 0 { + blen = 64 + } else if blen > 1024 { + blen = 1024 + } + for i := uint(0); i < vlen; { + var v2 string + i2 := i + blen + if i2 < vlen { + v2 = v[i:i2] + } else { + v2 = v[i:] + } + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r *decReaderSwitch + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", + major, d.bd, cbordesc(d.bd)) + return + } + return +} + +func (d *cborDecDriver) DecodeInt64() (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + if neg { + i = -(chkOvf.SignedIntV(ui + 1)) + } else { + i = chkOvf.SignedIntV(ui) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint64() (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt64()) + } else { + d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd)) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+ + " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd)) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + d.bdRead = false + if bs == nil { + if zerocopy { + return d.decAppendIndefiniteBytes(d.d.b[:0]) + } + return d.decAppendIndefiniteBytes(zeroByteSlice) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *cborDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return + } + xtag := d.decUint() + d.bdRead = false + return d.decodeTime(xtag) +} + +func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + switch xtag { + case 0: + var err error + if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil { + d.d.errorv(err) + } + case 1: + // decode an int64 or a float, and infer time.Time from there. + // for floats, round to microseconds, as that is what is guaranteed to fit well. + switch { + case d.bd == cborBdFloat16, d.bd == cborBdFloat32: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd == cborBdFloat64: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt, + d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + t = time.Unix(d.DecodeInt64(), 0) + default: + d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)") + } + default: + d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + } + t = t.UTC().Round(time.Microsecond) + return +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case cborBdIndefiniteBytes: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + if n.u == 0 || n.u == 1 { + d.bdRead = false + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + } + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, +// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +type CborHandle struct { + binaryEncodingType + noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool + + // TimeRFC3339 says to encode time.Time using RFC3339 format. + // If unset, we encode time.Time using seconds past epoch. + TimeRFC3339 bool + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: cbor +func (h *CborHandle) Name() string { return "cbor" } + +// SetInterfaceExt sets an extension +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go b/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go new file mode 100644 index 00000000000..cc5ecec6dbb --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go @@ -0,0 +1,13 @@ +// +build codecgen generated + +package codec + +// this file is here, to set the codecgen variable to true +// when the build tag codecgen is set. +// +// this allows us do specific things e.g. skip missing fields tests, +// when running in codecgen mode. + +func init() { + codecgen = true +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go index 851b54ac7e7..27d362012d1 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -1,578 +1,2439 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "encoding" + "errors" + "fmt" "io" "reflect" - // "runtime/debug" + "runtime" + "strconv" + "time" ) // Some tagging information for error messages. const ( - msgTagDec = "codec.decoder" - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" + msgBadDesc = "unrecognized descriptor byte" + // msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" ) +const ( + decDefMaxDepth = 1024 // maximum depth + decDefSliceCap = 8 + decDefChanCap = 64 // should be large, as cap cannot be expanded + decScratchByteArrayLen = cacheLineSize // + (8 * 2) // - (8 * 1) +) + +var ( + errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" + errstrCannotDecodeIntoNil = "cannot decode into nil" + + errmsgExpandSliceOverflow = "expand slice: slice overflow" + errmsgExpandSliceCannotChange = "expand slice: cannot change" + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") + errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") +) + +/* + // decReader abstracts the reading source, allowing implementations that can // read from an io.Reader or directly off a byte slice with zero-copying. +// +// Deprecated: Use decReaderSwitch instead. type decReader interface { - readn(n int) []byte + unreadn1() + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte readb([]byte) readn1() uint8 - readUint16() uint16 - readUint32() uint32 - readUint64() uint64 + numread() uint // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) } +*/ + type decDriver interface { - initReadNext() - tryDecodeAsNil() bool - currentEncodedType() valueType - isBuiltinType(rt uintptr) bool - decodeBuiltin(rt uintptr, v interface{}) - //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - decodeNaked() (v interface{}, vt valueType, decodeFurther bool) - decodeInt(bitsize uint8) (i int64) - decodeUint(bitsize uint8) (ui uint64) - decodeFloat(chkOverflow32 bool) (f float64) - decodeBool() (b bool) - // decodeString can also decode symbols - decodeString() (s string) - decodeBytes(bs []byte) (bsOut []byte, changed bool) - decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - readMapLen() int - readArrayLen() int + // this will check if the next token is a break. + CheckBreak() bool + // TryDecodeAsNil tries to decode as nil. + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // ContainerType returns one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + // Deprecated: use DecodeInt64 and DecodeUint64 instead + // DecodeInt(bitsize uint8) (i int64) + // DecodeUint(bitsize uint8) (ui uint64) + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat64() (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + DecodeTime() (t time.Time) + + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +type decodeError struct { + codecError + pos int +} + +func (d decodeError) Error() string { + return fmt.Sprintf("%s decode error [pos %d]: %v", d.name, d.pos, d.err) } +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadArrayElem() {} +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadMapElemKey() {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (x decNoSeparator) uncacheRead() {} + +// DecodeOptions captures configuration options during decode. type DecodeOptions struct { - // An instance of MapType is used during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, + // else map[interface{}]interface{}. MapType reflect.Type - // An instance of SliceType is used during schema-less decoding of an array in the stream. - // If nil, we use []interface{} + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. SliceType reflect.Type - // ErrorIfNoField controls whether an error is returned when decoding a map + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // MaxDepth defines the maximum depth when decoding nested + // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). + MaxDepth int16 + + // If ErrorIfNoField, return an error when decoding a map // from a codec stream into a struct, and no matching struct field is found. ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. + // By default, they are decoded as []byte, but can be decoded as string (if configured). + RawToString bool } -// ------------------------------------ +// ------------------------------------------------ + +type unreadByteStatus uint8 + +// unreadByteStatus goes from +// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... +const ( + unreadByteUndefined unreadByteStatus = iota + unreadByteCanRead + unreadByteCanUnread +) + +type ioDecReaderCommon struct { + r io.Reader // the reader passed in + + n uint // num read -// ioDecReader is a decReader that reads off an io.Reader + l byte // last byte + ls unreadByteStatus // last byte status + trb bool // tracking bytes turned on + _ bool + b [4]byte // tiny buffer for reading single bytes + + tr []byte // tracking bytes read +} + +func (z *ioDecReaderCommon) reset(r io.Reader) { + z.r = r + z.ls = unreadByteUndefined + z.l, z.n = 0, 0 + z.trb = false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *ioDecReaderCommon) numread() uint { + return z.n +} + +func (z *ioDecReaderCommon) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReaderCommon) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. type ioDecReader struct { - r io.Reader - br io.ByteReader - x [8]byte //temp byte array re-used internally for efficiency + ioDecReaderCommon + + rr io.Reader + br io.ByteScanner + + x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc + _ [1]uint64 // padding } -func (z *ioDecReader) readn(n int) (bs []byte) { - if n <= 0 { +func (z *ioDecReader) reset(r io.Reader) { + z.ioDecReaderCommon.reset(r) + + var ok bool + z.rr = r + z.br, ok = r.(io.ByteScanner) + if !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { return } - bs = make([]byte, n) - if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + var firstByte bool + if z.ls == unreadByteCanRead { + z.ls = unreadByteCanUnread + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = unreadByteCanUnread + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case unreadByteCanUnread: + z.ls = unreadByteCanRead + case unreadByteCanRead: + err = errDecUnreadByteLastByteNotRead + case unreadByteUndefined: + err = errDecUnreadByteNothingToRead + default: + err = errDecUnreadByteUnknown + } + return +} + +func (z *ioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + return + } + if n < uint(len(z.x)) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } return } func (z *ioDecReader) readb(bs []byte) { - if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + if len(bs) == 0 { + return + } + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } } -func (z *ioDecReader) readn1() uint8 { - if z.br != nil { - b, err := z.br.ReadByte() - if err != nil { - panic(err) +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) } - return b + } else if err == io.EOF { + eof = true + } else { + panic(err) } - z.readb(z.x[:1]) - return z.x[0] + return +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + var eof bool + // for { + // token, eof = z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // continue + // } + // return + // } +LOOP: + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + goto LOOP + } + return } -func (z *ioDecReader) readUint16() uint16 { - z.readb(z.x[:2]) - return bigen.Uint16(z.x[:2]) +func (z *ioDecReader) readTo(in []byte, accept *bitset256) []byte { + // out = in + + // for { + // token, eof := z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // out = append(out, token) + // } else { + // z.unreadn1() + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + return in + } + if accept.isset(token) { + // out = append(out, token) + in = append(in, token) + goto LOOP + } + z.unreadn1() + return in } -func (z *ioDecReader) readUint32() uint32 { - z.readb(z.x[:4]) - return bigen.Uint32(z.x[:4]) +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + // for { + // token, eof := z.readn1eof() + // if eof { + // panic(io.EOF) + // } + // out = append(out, token) + // if token == stop { + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + goto LOOP } -func (z *ioDecReader) readUint64() uint64 { - z.readb(z.x[:8]) - return bigen.Uint64(z.x[:8]) +//go:noinline +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } } // ------------------------------------ -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available +type bufioDecReader struct { + ioDecReaderCommon + + c uint // cursor + buf []byte + + bytesBufPooler + + // err error + + // Extensions can call Decode() within a current Decode() call. + // We need to know when the top level Decode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + _ [1]uint64 // padding } -func (z *bytesDecReader) consume(n int) (oldcursor int) { - if z.a == 0 { - panic(io.EOF) +func (z *bufioDecReader) reset(r io.Reader, bufsize int) { + z.ioDecReaderCommon.reset(r) + z.c = 0 + z.calls = 0 + if cap(z.buf) >= bufsize { + z.buf = z.buf[:0] + } else { + z.buf = z.bytesBufPooler.get(bufsize)[:0] + // z.buf = make([]byte, 0, bufsize) } - if n > z.a { - decErr("Trying to read %v bytes. Only %v available", n, z.a) +} + +func (z *bufioDecReader) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +func (z *bufioDecReader) readb(p []byte) { + var n = uint(copy(p, z.buf[z.c:])) + z.n += n + z.c += n + if len(p) == int(n) { + if z.trb { + z.tr = append(z.tr, p...) // cost=9 + } + } else { + z.readbFill(p, n) } - // z.checkAvailable(n) - oldcursor = z.c - z.c = oldcursor + n - z.a = z.a - n - return } -func (z *bytesDecReader) readn(n int) (bs []byte) { - if n <= 0 { +//go:noinline - fallback when z.buf is consumed +func (z *bufioDecReader) readbFill(p0 []byte, n uint) { + // at this point, there's nothing in z.buf to read (z.buf is fully consumed) + p := p0[n:] + var n2 uint + var err error + if len(p) > cap(z.buf) { + n2, err = decReadFull(z.r, p) + if err != nil { + panic(err) + } + n += n2 + z.n += n2 + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } return } - c0 := z.consume(n) - bs = z.b[c0:z.c] - return + // z.c is now 0, and len(p) <= cap(z.buf) +LOOP: + // for len(p) > 0 && z.err == nil { + if len(p) > 0 { + z.buf = z.buf[0:cap(z.buf)] + var n1 int + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + n2 = uint(copy(p, z.buf)) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + goto LOOP + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } } -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readn(len(bs))) +func (z *bufioDecReader) readn1() (b byte) { + // fast-path, so we elide calling into Read() most of the time + if z.c < uint(len(z.buf)) { + b = z.buf[z.c] + z.c++ + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else { // meaning z.c == len(z.buf) or greater ... so need to fill + z.readbFill(z.b[:1], 0) + b = z.b[0] + } + return } -func (z *bytesDecReader) readn1() uint8 { - c0 := z.consume(1) - return z.b[c0] +func (z *bufioDecReader) unreadn1() { + if z.c == 0 { + panic(errDecUnreadByteNothingToRead) + } + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } } -// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits -// creating temp slice variable and copying it to helper function is expensive -// for just 2 bits. +func (z *bufioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + // return + } else if z.c+n <= uint(len(z.buf)) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + } else { + bs = make([]byte, n) + // n no longer used - can reuse + n = uint(copy(bs, z.buf[z.c:])) + z.n += n + z.c += n + z.readbFill(bs, n) + } + return +} -func (z *bytesDecReader) readUint16() uint16 { - c0 := z.consume(2) - return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +//go:noinline - track called by Decoder.nextValueBytes() (called by jsonUnmarshal,rawBytes) +func (z *bufioDecReader) doTrack(y uint) { + z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14??? } -func (z *bytesDecReader) readUint32() uint32 { - c0 := z.consume(4) - return bigen.Uint32(z.b[c0:z.c]) +func (z *bufioDecReader) skipLoopFn(i uint) { + z.n += (i - z.c) - 1 + i++ + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i } -func (z *bytesDecReader) readUint64() uint64 { - c0 := z.consume(8) - return bigen.Uint64(z.b[c0:z.c]) +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + // token, _ = z.search(nil, accept, 0, 1); return + + // for i := z.c; i < len(z.buf); i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + // inline z.skipLoopFn(i) and refactor, so cost is within inline budget + token = z.buf[i] + i++ + if accept.isset(token) { + goto LOOP + } + z.n += i - 2 - z.c + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + return z.skipFill(accept) } -// ------------------------------------ +func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) { + z.n += uint(len(z.buf)) - z.c + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + var i int + for i, token = range z.buf { + if !accept.isset(token) { + z.skipLoopFn(uint(i)) + return + } + } + // for i := 0; i < n2; i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} -// decFnInfo has methods for registering handling decoding of a specific type -// based on some characteristics (builtin, extension, reflect Kind, etc) -type decFnInfo struct { - ti *typeInfo - d *Decoder - dd decDriver - xfFn func(reflect.Value, []byte) error - xfTag byte - array bool +func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) { + // out0 is never nil + z.n += (i - z.c) - 1 + out = append(out0, z.buf[z.c:i]...) + if z.trb { + z.doTrack(i) + } + z.c = i + return } -func (f *decFnInfo) builtin(rv reflect.Value) { - f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + // _, out = z.search(in, accept, 0, 2); return + + // for i := z.c; i < len(z.buf); i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // inline readToLoopFn here (for performance) + z.n += (i - z.c) - 1 + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readToFill(in, accept) } -func (f *decFnInfo) rawExt(rv reflect.Value) { - xtag, xbs := f.dd.decodeExt(false, 0) - rv.Field(0).SetUint(uint64(xtag)) - rv.Field(1).SetBytes(xbs) +func (z *bufioDecReader) readToFill(in []byte, accept *bitset256) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + if err == io.EOF { + return // readTo should read until it matches or end is reached + } + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if !accept.isset(token) { + return z.readToLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } } -func (f *decFnInfo) ext(rv reflect.Value) { - _, xbs := f.dd.decodeExt(true, f.xfTag) - if fnerr := f.xfFn(rv, xbs); fnerr != nil { - panic(fnerr) +func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) { + z.n += (i - z.c) - 1 + i++ + out = append(out0, z.buf[z.c:i]...) + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) } + z.c = i + return } -func (f *decFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryUnmarshaler - if f.ti.unmIndir == -1 { - bm = rv.Addr().Interface().(binaryUnmarshaler) - } else if f.ti.unmIndir == 0 { - bm = rv.Interface().(binaryUnmarshaler) - } else { - for j, k := int8(0), f.ti.unmIndir; j < k; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + // _, out = z.search(in, nil, stop, 4); return + + // for i := z.c; i < len(z.buf); i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if z.buf[i] == stop { + // inline readUntilLoopFn + // return z.readUntilLoopFn(i, nil) + z.n += (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) } - rv = rv.Elem() + z.c = i + return } - bm = rv.Interface().(binaryUnmarshaler) - } - xbs, _ := f.dd.decodeBytes(nil) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) + i++ + goto LOOP } + return z.readUntilFill(in, stop) } -func (f *decFnInfo) kErr(rv reflect.Value) { - decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +func (z *bufioDecReader) readUntilFill(in []byte, stop byte) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n1 int + var n2 uint + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if token == stop { + return z.readUntilLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += n2 + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } } -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.dd.decodeString()) +// ------------------------------------ + +var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c uint // cursor + t uint // track start + // a int // available } -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.dd.decodeBool()) +func (z *bytesDecReader) reset(in []byte) { + z.b = in + // z.a = len(in) + z.c = 0 + z.t = 0 } -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(intBitsize)) +func (z *bytesDecReader) numread() uint { + return z.c } -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(64)) +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(errBytesDecReaderCannotUnread) + } + z.c-- + // z.a++ } -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(32)) +func (z *bytesDecReader) readx(n uint) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + // if n <= 0 { + // } else if z.a == 0 { + // panic(io.EOF) + // } else if n > z.a { + // panic(io.ErrUnexpectedEOF) + // } else { + // c0 := z.c + // z.c = c0 + n + // z.a = z.a - n + // bs = z.b[c0:z.c] + // } + // return + + if n != 0 { + z.c += n + if z.c > uint(len(z.b)) { + z.c = uint(len(z.b)) + panic(io.EOF) + } + bs = z.b[z.c-n : z.c] + } + return + + // if n == 0 { + // } else if z.c+n > uint(len(z.b)) { + // z.c = uint(len(z.b)) + // panic(io.EOF) + // } else { + // z.c += n + // bs = z.b[z.c-n : z.c] + // } + // return + + // if n == 0 { + // return + // } + // if z.c == uint(len(z.b)) { + // panic(io.EOF) + // } + // if z.c+n > uint(len(z.b)) { + // panic(io.ErrUnexpectedEOF) + // } + // // z.a -= n + // z.c += n + // return z.b[z.c-n : z.c] } -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(8)) +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(uint(len(bs)))) } -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(16)) +func (z *bytesDecReader) readn1() (v uint8) { + if z.c == uint(len(z.b)) { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + // z.a-- + return } -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(true)) +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + i := z.c + // if i == len(z.b) { + // goto END + // // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // token = z.b[i] + // i++ + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + + // i := z.c +LOOP: + if i < uint(len(z.b)) { + token = z.b[i] + i++ + if accept.isset(token) { + goto LOOP + } + // z.a -= (i - z.c) + z.c = i + return + } + // END: + panic(io.EOF) + // // z.a = 0 + // z.c = blen + // return } -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(false)) +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + return z.readToNoInput(accept) } -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(8)) +func (z *bytesDecReader) readToNoInput(accept *bitset256) (out []byte) { + i := z.c + if i == uint(len(z.b)) { + panic(io.EOF) + } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + // out = z.b[z.c:] + // z.a, z.c = 0, blen + // return + + // i := z.c + // LOOP: + // if i < blen { + // if accept.isset(z.b[i]) { + // i++ + // goto LOOP + // } + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // out = z.b[z.c:] + // // z.a, z.c = 0, blen + // z.a = 0 + // z.c = blen + // return + + // c := i +LOOP: + if i < uint(len(z.b)) { + if accept.isset(z.b[i]) { + i++ + goto LOOP + } + } + + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return // z.b[c:i] + // z.c, i = i, z.c + // return z.b[i:z.c] } -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(64)) +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + return z.readUntilNoInput(stop) } -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(uintBitsize)) +func (z *bytesDecReader) readUntilNoInput(stop byte) (out []byte) { + i := z.c + // if i == len(z.b) { + // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if z.b[i] == stop { + // i++ + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } +LOOP: + if i < uint(len(z.b)) { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return + } + i++ + goto LOOP + } + // z.a = 0 + // z.c = blen + panic(io.EOF) } -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(32)) +func (z *bytesDecReader) track() { + z.t = z.c } -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(16)) +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] } -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) +// ---------------------------------------- + +// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { +// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) // } -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - if !rv.IsNil() { - f.d.decodeValue(rv.Elem()) - return +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { // nil interface: - // use some hieristics to set the nil interface to an - // appropriate value based on the first byte read (byte descriptor bd) - v, vt, decodeFurther := f.dd.decodeNaked() - if vt == valueTypeNil { + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { return } - // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) - // if non-nil value in stream. - if num := f.ti.rt.NumMethod(); num > 0 { - decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", - f.ti.rt, num) + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return } - var rvn reflect.Value - var useRvn bool - switch vt { + // var useRvn bool + switch n.v { case valueTypeMap: - if f.d.h.MapType == nil { - var m2 map[interface{}]interface{} - v = &m2 + // if json, default to a map type with string keys + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else if mtid == mapStrIntfTypId { // for json performance + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() } else { - rvn = reflect.New(f.d.h.MapType).Elem() - useRvn = true + if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil, true) + } } case valueTypeArray: - if f.d.h.SliceType == nil { - var m2 []interface{} - v = &m2 + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } } else { - rvn = reflect.New(f.d.h.SliceType).Elem() - useRvn = true + if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil, true) + } } case valueTypeExt: - re := v.(*RawExt) - var bfn func(reflect.Value, []byte) error - rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + d.decode(&v) + } + bfn := d.h.getExtForTag(tag) if bfn == nil { - rvn = reflect.ValueOf(*re) - } else if fnerr := bfn(rvn, re.Data); fnerr != nil { - panic(fnerr) + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() } - rv.Set(rvn) - return + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) } - if decodeFurther { - if useRvn { - f.d.decodeValue(rvn) - } else if v != nil { - // this v is a pointer, so we need to dereference it when done - f.d.decode(v) - rvn = reflect.ValueOf(v).Elem() - useRvn = true + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() || d.h.InterfaceReset { + // check if mapping to a type: if so, initialize it and move on + rvn = d.h.intf2impl(f.ti.rtid) + if rvn.IsValid() { + rv.Set(rvn) + } else { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rv.Set(rvn) + } else if d.h.InterfaceReset { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return } + } else { + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() } - if useRvn { - rv.Set(rvn) - } else if v != nil { - rv.Set(reflect.ValueOf(v)) + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true) + rv.Set(rvn2) +} + +func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + rvkencname = dd.DecodeStringAsBytes() + } else if keyType == valueTypeInt { + rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) + } else if keyType == valueTypeUint { + rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) + } else if keyType == valueTypeFloat { + rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) + } else { + rvkencname = dd.DecodeStringAsBytes() } + return rvkencname } -func (f *decFnInfo) kStruct(rv reflect.Value) { +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { fti := f.ti - if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { - containerLen := f.dd.readMapLen() + dd := d.d + elemsep := d.esep + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + var mf MissingFielder + if fti.mf { + mf = rv2i(rv).(MissingFielder) + } else if fti.mfp { + mf = rv2i(rv.Addr()).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() if containerLen == 0 { + dd.ReadMapEnd() return } - tisfi := fti.sfi - for j := 0; j < containerLen; j++ { - // var rvkencname string - // ddecode(&rvkencname) - f.dd.initReadNext() - rvkencname := f.dd.decodeString() - // rvksi := ti.getForEncName(rvkencname) + d.depthIncr() + tisfi := fti.sfiSort + hasLen := containerLen >= 0 + + var rvkencname []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) + if elemsep { + dd.ReadMapElemValue() + } if k := fti.indexForEncName(rvkencname); k > -1 { - sfik := tisfi[k] - if sfik.i != -1 { - f.d.decodeValue(rv.Field(int(sfik.i))) + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) } else { - f.d.decEmbeddedField(rv, sfik.is) + d.decodeValue(sfn.field(si), nil, true) } - // f.d.decodeValue(ti.field(k, rv)) - } else { - if f.d.h.ErrorIfNoField { - decErr("No matching struct field found when decoding stream map with key: %v", - rvkencname) - } else { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } else if mf != nil { + // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode + name2 := rvkencname + rvkencname = make([]byte, len(rvkencname)) + copy(rvkencname, name2) + + var f interface{} + // xdebugf("kStruct: mf != nil: before decode: rvkencname: %s", rvkencname) + d.decode(&f) + // xdebugf("kStruct: mf != nil: after decode: rvkencname: %s", rvkencname) + if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField { + d.errorf("no matching struct field found when decoding stream map with key: %s ", + stringView(rvkencname)) } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) } + // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop } - } else if currEncodedType == valueTypeArray { - containerLen := f.dd.readArrayLen() + dd.ReadMapEnd() + d.depthDecr() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() if containerLen == 0 { + dd.ReadArrayEnd() return } - for j, si := range fti.sfip { - if j == containerLen { + d.depthIncr() + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + var checkbreak bool + for j, si := range fti.sfiSrc { + if hasLen && j == containerLen { break } - if si.i != -1 { - f.d.decodeValue(rv.Field(int(si.i))) + if !hasLen && dd.CheckBreak() { + checkbreak = true + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) } else { - f.d.decEmbeddedField(rv, si.is) + d.decodeValue(sfn.field(si), nil, true) } } - if containerLen > len(fti.sfip) { + if (hasLen && containerLen > len(fti.sfiSrc)) || (!hasLen && !checkbreak) { // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + for j := len(fti.sfiSrc); ; j++ { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") } } + dd.ReadArrayEnd() + d.depthDecr() } else { - decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", - currEncodedType) + d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) + return } } -func (f *decFnInfo) kSlice(rv reflect.Value) { +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { // A slice can be set from a map or array in stream. - currEncodedType := f.dd.currentEncodedType() + // This way, the order can be kept (as order is lost with map). + ti := f.ti + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { + d.errorf("receive-only channel cannot be decoded") + } + dd := d.d + rtelem0 := ti.elem + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else if len(rvbs) > 0 && len(bs2) > 0 { + copy(rvbs, bs2) + } + } + } + return + } - switch currEncodedType { - case valueTypeBytes, valueTypeString: - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { - rv.SetBytes(bs2) + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } } - return } + slh.End() + return } - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case intfSliceTypId: - f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) - return - case uint64SliceTypId: - f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) - return - case int64SliceTypId: - f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) - return - case strSliceTypId: - f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) - return + d.depthIncr() + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rvCanset = rv.CanSet() + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rvCanset { + rv.SetLen(rvlen) + } + } else if rvCanset { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } else { + d.errorf("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rvCanset { + rv.SetLen(rvlen) + } + // else { + // rv = rv.Slice(0, rvlen) + // rvChanged = true + // d.errorf("cannot decode into non-settable slice") + // } } } - containerLen, containerLenS := decContLens(f.dd, currEncodedType) + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else if f.seq == seqTypeSlice { + rvlen = decDefSliceCap + } else { + rvlen = decDefChanCap + } + if rvCanset { + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else { // chan + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } + } else { + d.errorf("cannot decode into non-settable slice") + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs + var rvcap2 int + var rvErrmsg2 string + rv9, rvcap2, rvChanged, rvErrmsg2 = + expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) + if rvErrmsg2 != "" { + d.errorf(rvErrmsg2) + } + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + if decodeAsNil { + continue + } + } - // an array can never return a nil slice. so no need to check f.array here. + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else if rvCanset { + rv = rv.Slice(0, j) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + rvlen = j + } else if j == 0 && rv.IsNil() { + if rvCanset { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + } + } + slh.End() + + if rvChanged { // infers rvCanset=true, so it can be reset + rv0.Set(rv) + } + + d.depthDecr() +} +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.esep + ti := f.ti if rv.IsNil() { - rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) + rv.Set(makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + d.depthIncr() + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := vtype.Kind() + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // // Previously, if a nil key, we just ignored the mapped value and continued. + // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // // to be an empty map. + // // Instead, we treat a nil key as the zero value of the type. + // rvk.Set(reflect.Zero(ktype)) + // } else if ktypeIsString { + if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else { + if keyFn == nil { + keyFn = d.h.fn(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + if rvk2.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk2.Bytes())) + } else { + rvk = rvk2 + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if valFn == nil { + valFn = d.h.fn(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, true) + // d.decodeValueFn(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() + + d.depthDecr() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. + +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + + // primitives below + u uint64 + i int64 + f float64 + l []byte + s string + + // ---- cpu cache line boundary? + t time.Time + b bool + + // state + v valueType + _ [6]bool // padding + + // ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above + // + // _ [3]uint64 // padding +} + +// func (n *decNaked) init() { +// n.ru = reflect.ValueOf(&n.u).Elem() +// n.ri = reflect.ValueOf(&n.i).Elem() +// n.rf = reflect.ValueOf(&n.f).Elem() +// n.rl = reflect.ValueOf(&n.l).Elem() +// n.rs = reflect.ValueOf(&n.s).Elem() +// n.rt = reflect.ValueOf(&n.t).Elem() +// n.rb = reflect.ValueOf(&n.b).Elem() +// // n.rr[] = reflect.ValueOf(&n.) +// } + +// type decNakedPooler struct { +// n *decNaked +// nsp *sync.Pool +// } + +// // naked must be called before each call to .DecodeNaked, as they will use it. +// func (d *decNakedPooler) naked() *decNaked { +// if d.n == nil { +// // consider one of: +// // - get from sync.Pool (if GC is frequent, there's no value here) +// // - new alloc (safest. only init'ed if it a naked decode will be done) +// // - field in Decoder (makes the Decoder struct very big) +// // To support using a decoder where a DecodeNaked is not needed, +// // we prefer #1 or #2. +// // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool +// // d.n.init() +// var v interface{} +// d.nsp, v = pool.decNaked() +// d.n = v.(*decNaked) +// } +// return d.n +// } + +// func (d *decNakedPooler) end() { +// if d.n != nil { +// // if n != nil, then nsp != nil (they are always set together) +// d.nsp.Put(d.n) +// d.n, d.nsp = nil, nil +// } +// } + +// type rtid2rv struct { +// rtid uintptr +// rv reflect.Value +// } + +// -------------- + +type decReaderSwitch struct { + rb bytesDecReader + // ---- cpu cache line boundary? + ri *ioDecReader + bi *bufioDecReader + + mtr, str bool // whether maptype or slicetype are known types + + be bool // is binary encoding + js bool // is json handle + jsms bool // is json handle, and MapKeyAsString + esep bool // has elem separators + + // typ entryType + bytes bool // is bytes reader + bufio bool // is this a bufioDecReader? +} + +// numread, track and stopTrack are always inlined, as they just check int fields, etc. + +/* +func (z *decReaderSwitch) numread() int { + switch z.typ { + case entryTypeBytes: + return z.rb.numread() + case entryTypeIo: + return z.ri.numread() + default: + return z.bi.numread() + } +} +func (z *decReaderSwitch) track() { + switch z.typ { + case entryTypeBytes: + z.rb.track() + case entryTypeIo: + z.ri.track() + default: + z.bi.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.stopTrack() + case entryTypeIo: + return z.ri.stopTrack() + default: + return z.bi.stopTrack() + } +} + +func (z *decReaderSwitch) unreadn1() { + switch z.typ { + case entryTypeBytes: + z.rb.unreadn1() + case entryTypeIo: + z.ri.unreadn1() + default: + z.bi.unreadn1() + } +} +func (z *decReaderSwitch) readx(n int) []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.readx(n) + case entryTypeIo: + return z.ri.readx(n) + default: + return z.bi.readx(n) + } +} +func (z *decReaderSwitch) readb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.rb.readb(s) + case entryTypeIo: + z.ri.readb(s) + default: + z.bi.readb(s) + } +} +func (z *decReaderSwitch) readn1() uint8 { + switch z.typ { + case entryTypeBytes: + return z.rb.readn1() + case entryTypeIo: + return z.ri.readn1() + default: + return z.bi.readn1() + } +} +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.skip(accept) + case entryTypeIo: + return z.ri.skip(accept) + default: + return z.bi.skip(accept) + } +} +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readTo(in, accept) + case entryTypeIo: + return z.ri.readTo(in, accept) + default: + return z.bi.readTo(in, accept) + } +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readUntil(in, stop) + case entryTypeIo: + return z.ri.readUntil(in, stop) + default: + return z.bi.readUntil(in, stop) + } +} + +*/ + +// the if/else-if/else block is expensive to inline. +// Each node of this construct costs a lot and dominates the budget. +// Best to only do an if fast-path else block (so fast-path is inlined). +// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go +// +// In decReaderSwitch methods below, we delegate all IO functions into their own methods. +// This allows for the inlining of the common path when z.bytes=true. +// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs). + +func (z *decReaderSwitch) numread() uint { + if z.bytes { + return z.rb.numread() + } else if z.bufio { + return z.bi.numread() + } else { + return z.ri.numread() + } +} +func (z *decReaderSwitch) track() { + if z.bytes { + z.rb.track() + } else if z.bufio { + z.bi.track() + } else { + z.ri.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + if z.bytes { + return z.rb.stopTrack() + } else if z.bufio { + return z.bi.stopTrack() + } else { + return z.ri.stopTrack() + } +} + +// func (z *decReaderSwitch) unreadn1() { +// if z.bytes { +// z.rb.unreadn1() +// } else { +// z.unreadn1IO() +// } +// } +// func (z *decReaderSwitch) unreadn1IO() { +// if z.bufio { +// z.bi.unreadn1() +// } else { +// z.ri.unreadn1() +// } +// } + +func (z *decReaderSwitch) unreadn1() { + if z.bytes { + z.rb.unreadn1() + } else if z.bufio { + z.bi.unreadn1() + } else { + z.ri.unreadn1() // not inlined + } +} + +func (z *decReaderSwitch) readx(n uint) []byte { + if z.bytes { + return z.rb.readx(n) + } + return z.readxIO(n) +} +func (z *decReaderSwitch) readxIO(n uint) []byte { + if z.bufio { + return z.bi.readx(n) + } + return z.ri.readx(n) +} + +func (z *decReaderSwitch) readb(s []byte) { + if z.bytes { + z.rb.readb(s) + } else { + z.readbIO(s) + } +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readbIO(s []byte) { + if z.bufio { + z.bi.readb(s) + } else { + z.ri.readb(s) + } +} + +func (z *decReaderSwitch) readn1() uint8 { + if z.bytes { + return z.rb.readn1() + } + return z.readn1IO() +} +func (z *decReaderSwitch) readn1IO() uint8 { + if z.bufio { + return z.bi.readn1() } + return z.ri.readn1() +} - if containerLen == 0 { - return +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + if z.bytes { + return z.rb.skip(accept) } - - if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { - if f.array { // !rv.CanSet() - decErr(msgDecCannotExpandArr, rvcap, containerLenS) - } - rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) - if rvlen > 0 { - reflect.Copy(rvn, rv) - } - rv.Set(rvn) - } else if containerLenS > rvlen { - rv.SetLen(containerLenS) + return z.skipIO(accept) +} +func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) { + if z.bufio { + return z.bi.skip(accept) } + return z.ri.skip(accept) +} - for j := 0; j < containerLenS; j++ { - f.d.decodeValue(rv.Index(j)) +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + if z.bytes { + return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept) } + return z.readToIO(in, accept) } -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readToIO(in []byte, accept *bitset256) (out []byte) { + if z.bufio { + return z.bi.readTo(in, accept) + } + return z.ri.readTo(in, accept) +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + if z.bytes { + return z.rb.readUntilNoInput(stop) + } + return z.readUntilIO(in, stop) } -func (f *decFnInfo) kMap(rv reflect.Value) { - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case mapStrIntfTypId: - f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) - return - case mapIntfIntfTypId: - f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) - return - case mapInt64IntfTypId: - f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) - return - case mapUint64IntfTypId: - f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) - return - } +func (z *decReaderSwitch) readUntilIO(in []byte, stop byte) (out []byte) { + if z.bufio { + return z.bi.readUntil(in, stop) } + return z.ri.readUntil(in, stop) +} - containerLen := f.dd.readMapLen() +// Decoder reads and decodes an object from an input stream in a supported format. +// +// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Decoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Decoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). - if rv.IsNil() { - rv.Set(reflect.MakeMap(f.ti.rt)) - } + d decDriver - if containerLen == 0 { - return - } + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r *decReaderSwitch - ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - for j := 0; j < containerLen; j++ { - rvk := reflect.New(ktype).Elem() - f.d.decodeValue(rvk) + // bi *bufioDecReader + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr - // special case if a byte array. - // if ktype == intfTyp { - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(string(rvk.Bytes())) - } - } - rvv := rv.MapIndex(rvk) - if !rvv.IsValid() || !rvv.CanSet() { - rvv = reflect.New(vtype).Elem() - } + hh Handle + h *BasicHandle - f.d.decodeValue(rvv) - rv.SetMapIndex(rvk, rvv) - } -} + // ---- cpu cache line boundary? + decReaderSwitch -// ---------------------------------------- + // ---- cpu cache line boundary? + n decNaked -type decFn struct { - i *decFnInfo - f func(*decFnInfo, reflect.Value) -} + // cr containerStateRecv + err error -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - r decReader - d decDriver - h *BasicHandle - f map[uintptr]decFn - x []uintptr - s []decFn + depth int16 + maxdepth int16 + + _ [4]uint8 // padding + + is map[string]string // used for interning strings + + // ---- cpu cache line boundary? + b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers + + // padding - false sharing help // modify 232 if Decoder struct changes. + // _ [cacheLineSize - 232%cacheLineSize]byte } // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. // -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Reader, bytes.Buffer). +// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle +// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). func NewDecoder(r io.Reader, h Handle) *Decoder { - z := ioDecReader{ - r: r, - } - z.br, _ = r.(io.ByteReader) - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} + d := newDecoder(h) + d.Reset(r) + return d } // NewDecoderBytes returns a Decoder which efficiently decodes directly // from a byte slice with zero copying. func NewDecoderBytes(in []byte, h Handle) *Decoder { - z := bytesDecReader{ - b: in, - a: len(in), + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +// var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized} + d.bytes = true + if useFinalizers { + runtime.SetFinalizer(d, (*Decoder).finalize) + // xdebugf(">>>> new(Decoder) with finalizer") + } + d.r = &d.decReaderSwitch + d.hh = h + d.be = h.isBinary() + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + var jh *JsonHandle + jh, d.js = h.(*JsonHandle) + if d.js { + d.jsms = jh.MapKeyAsString + } + d.esep = d.hh.hasElemSeparators() + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +func (d *Decoder) resetCommon() { + // d.r = &d.decReaderSwitch + d.d.reset() + d.err = nil + d.depth = 0 + d.maxdepth = d.h.MaxDepth + if d.maxdepth <= 0 { + d.maxdepth = decDefMaxDepth + } + // reset all things which were cached from the Handle, but could change + d.mtid, d.stid = 0, 0 + d.mtr, d.str = false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + d.mtr = fastpathAV.index(d.mtid) != -1 + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + d.str = fastpathAV.index(d.stid) != -1 + } +} + +// Reset the Decoder with a new Reader to decode from, +// clearing all state from last run(s). +func (d *Decoder) Reset(r io.Reader) { + if r == nil { + return + } + d.bytes = false + // d.typ = entryTypeUnset + if d.h.ReaderBufferSize > 0 { + if d.bi == nil { + d.bi = new(bufioDecReader) + } + d.bi.reset(r, d.h.ReaderBufferSize) + // d.r = d.bi + // d.typ = entryTypeBufio + d.bufio = true + } else { + // d.ri.x = &d.b + // d.s = d.sa[:0] + if d.ri == nil { + d.ri = new(ioDecReader) + } + d.ri.reset(r) + // d.r = d.ri + // d.typ = entryTypeIo + d.bufio = false + } + d.resetCommon() +} + +// ResetBytes resets the Decoder with a new []byte to decode from, +// clearing all state from last run(s). +func (d *Decoder) ResetBytes(in []byte) { + if in == nil { + return } - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} + d.bytes = true + d.bufio = false + // d.typ = entryTypeBytes + d.rb.reset(in) + // d.r = &d.rb + d.resetCommon() +} + +func (d *Decoder) naked() *decNaked { + return &d.n } // Decode decodes the stream from reader and stores the result in the @@ -609,7 +2470,8 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder { // - Else decode it based on its reflect.Kind // // There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. +// Decode will typically use the stream contents to UPDATE the container i.e. the values +// in these containers will not be zero'ed before decoding. // - A map can be decoded from a stream map, by updating matching keys. // - A slice can be decoded from a stream array, // by updating the first n elements, where n is length of the stream. @@ -619,430 +2481,629 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder { // - A struct can be decoded from a stream array, // by updating fields as they occur in the struct (by index). // -// When decoding a stream map or array with length of 0 into a nil map or slice, +// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update +// in place by default). However, the consequence of this is that values in slices or maps +// which are not zero'ed before hand, will have part of the prior values in place after decode +// if the stream doesn't contain an update for those parts. +// +// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset +// decode options available on every handle. +// +// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice, // we reset the destination map or slice to a zero-length value. // // However, when decoding a stream nil, we reset the destination container // to its "zero" value (e.g. nil for slice/map, etc). // +// Note: we allow nil values in the stream anywhere except for map keys. +// A nil value in the encoded stream where a map key is expected is treated as an error. func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { d.deferred(&err) }() + // { x, y := d, &err; defer func() { x.deferred(y) }() } + if d.err != nil { + return d.err + } + if recoverPanicToErr { + defer func() { + if x := recover(); x != nil { + panicValToErr(d, x, &d.err) + err = d.err + } + }() + } + + // defer d.deferred(&err) + d.mustDecode(v) return } -func (d *Decoder) decode(iv interface{}) { - d.d.initReadNext() +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + if d.err != nil { + panic(d.err) + } + d.mustDecode(v) +} - switch v := iv.(type) { - case nil: - decErr("Cannot decode into nil.") +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) mustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.d.TryDecodeAsNil() { + setZero(v) + return + } + if d.bi == nil { + d.decode(v) + return + } - case reflect.Value: - d.chkPtrValue(v) - d.decodeValue(v.Elem()) + d.bi.calls++ + d.decode(v) + // xprintf.(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) + d.bi.calls-- + if !d.h.ExplicitRelease && d.bi.calls == 0 { + d.bi.release() + } +} + +// func (d *Decoder) deferred(err1 *error) { +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(d, x, err1) +// panicValToErr(d, x, &d.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (d *Decoder) finalize() { + // xdebugf("finalizing Decoder") + d.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with a Decoder, so those resources +// are released instantly for use by subsequently created Decoders. +// +// By default, Release() is automatically called unless the option ExplicitRelease is set. +func (d *Decoder) Release() { + if d.bi != nil { + d.bi.release() + } + // d.decNakedPooler.end() +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.esep + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + d.depthDecr() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + d.depthDecr() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + var v2 interface{} + d.decode(&v2) + } + } +} +func setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { case *string: - *v = d.d.decodeString() + *v = "" case *bool: - *v = d.d.decodeBool() + *v = false case *int: - *v = int(d.d.decodeInt(intBitsize)) + *v = 0 case *int8: - *v = int8(d.d.decodeInt(8)) + *v = 0 case *int16: - *v = int16(d.d.decodeInt(16)) + *v = 0 case *int32: - *v = int32(d.d.decodeInt(32)) + *v = 0 case *int64: - *v = d.d.decodeInt(64) + *v = 0 case *uint: - *v = uint(d.d.decodeUint(uintBitsize)) + *v = 0 case *uint8: - *v = uint8(d.d.decodeUint(8)) + *v = 0 case *uint16: - *v = uint16(d.d.decodeUint(16)) + *v = 0 case *uint32: - *v = uint32(d.d.decodeUint(32)) + *v = 0 case *uint64: - *v = d.d.decodeUint(64) + *v = 0 case *float32: - *v = float32(d.d.decodeFloat(true)) + *v = 0 case *float64: - *v = d.d.decodeFloat(false) - case *[]byte: - *v, _ = d.d.decodeBytes(*v) - - case *[]interface{}: - d.decSliceIntf(v, valueTypeInvalid, false) - case *[]uint64: - d.decSliceUint64(v, valueTypeInvalid, false) - case *[]int64: - d.decSliceInt64(v, valueTypeInvalid, false) - case *[]string: - d.decSliceStr(v, valueTypeInvalid, false) - case *map[string]interface{}: - d.decMapStrIntf(v) - case *map[interface{}]interface{}: - d.decMapIntfIntf(v) - case *map[uint64]interface{}: - d.decMapUint64Intf(v) - case *map[int64]interface{}: - d.decMapInt64Intf(v) - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem()) - + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? default: - rv := reflect.ValueOf(iv) - d.chkPtrValue(rv) - d.decodeValue(rv.Elem()) + if !fastpathDecodeSetZeroTypeSwitch(iv) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } } } -func (d *Decoder) decodeValue(rv reflect.Value) { - d.d.initReadNext() +func (d *Decoder) decode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. - if d.d.tryDecodeAsNil() { - // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) - if rv.Kind() == reflect.Ptr { - if !rv.IsNil() { - rv.Set(reflect.Zero(rv.Type())) - } - return - } - // for rv.Kind() == reflect.Ptr { - // rv = rv.Elem() - // } - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } + if iv == nil { + d.errorstr(errstrCannotDecodeIntoNil) return } - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) + switch v := iv.(type) { + // case nil: + // case Selfer: + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, true) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *float32: + f64 := d.d.DecodeFloat64() + if chkOvf.Float32(f64) { + d.errorf("float32 overflow: %v", f64) } - rv = rv.Elem() - } + *v = float32(f64) + case *float64: + *v = d.d.DecodeFloat64() + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + case []uint8: + b := d.d.DecodeBytes(v, false) + if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { + copy(v, b) + } + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times + default: + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + } else if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false) + // d.decodeValueFallback(v) + } + } +} - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var fn decFn - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i, v := range d.x { - if v == rtid { - fn, ok = d.s[i], true +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { break } } } - if !ok { - // debugf("\tCreating new dec fn for type: %v\n", rt) - fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} - fn.i = &fi - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.isBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*decFnInfo).ext - } else if supportBinaryMarshal && fi.ti.unm { - fn.f = (*decFnInfo).binaryMarshal + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else if !fn.i.addrF { + fn.fd(d, &fn.i, rv) } else { - switch rk := rt.Kind(); rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Slice: - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.array = true - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } + d.errorf("cannot decode into a non-pointer value") } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]decFn, 16) - } - d.f[rtid] = fn - } else { - d.s = append(d.s, fn) - d.x = append(d.x, rtid) + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return } } + d.swallow() +} - fn.f(fn.i, rv) +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, rv.CanAddr() + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } return } -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { return } if !rv.IsValid() { - decErr("Cannot decode into a zero (ie invalid) reflect.Value") + d.errorstr(errstrCannotDecodeIntoNil) + return } if !rv.CanInterface() { - decErr("Cannot decode into a value without an interface: %v", rv) + d.errorf("cannot decode into a value without an interface: %v", rv) + return } - rvi := rv.Interface() - decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", - rv.Kind(), rvi, rvi) + rvi := rv2i(rv) + rvk := rv.Kind() + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) + return } -func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { - // d.decodeValue(rv.FieldByIndex(index)) - // nil pointers may be here; so reproduce FieldByIndex logic + enhancements - for _, j := range index { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - // If a pointer, it must be a pointer to struct (based on typeInfo contract) - rv = rv.Elem() - } - rv = rv.Field(j) +func (d *Decoder) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + panic(errMaxDepthExceeded) + } +} + +func (d *Decoder) depthDecr() { + d.depth-- +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s } - d.decodeValue(rv) + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +func (d *Decoder) wrapErr(v interface{}, err *error) { + *err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: int(d.r.numread())} +} + +// NumBytesRead returns the number of bytes read +func (d *Decoder) NumBytesRead() int { + return int(d.r.numread()) } // -------------------------------------------------- -// short circuit functions for common maps and slices - -func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]interface{}, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]interface{}, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - d.decode(&s[j]) - } - *v = s -} - -func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]int64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]int64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeInt(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]uint64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]uint64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeUint(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]string, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]string, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeString() - } - *v = s -} - -func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[interface{}]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - var mk interface{} - d.decode(&mk) - // special case if a byte array. - if bv, bok := mk.([]byte); bok { - mk = string(bv) - } - mv := m[mk] - d.decode(&mv) - m[mk] = mv +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + switch ctyp { + case valueTypeArray: + x.array = true + clen = dd.ReadArrayStart() + case valueTypeMap: + clen = dd.ReadMapStart() * 2 + default: + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) } + // x.ct = ctyp + x.d = d + return } -func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[int64]interface{}, containerLen) - *v = m +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeInt(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() } } -func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[uint64]interface{}, containerLen) - *v = m +func decByteSlice(r *decReaderSwitch, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeUint(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } } + return } -func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[string]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeString() - mv := m[mk] - d.decode(&mv) - m[mk] = mv +// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { +// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { +// return r.readx(clen) +// } +// return decByteSlice(r, clen, maxInitLen, bs) +// } + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } } + return in } -// ---------------------------------------- +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} -func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { - if currEncodedType == valueTypeInvalid { - currEncodedType = dd.currentEncodedType() +func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool, err string) { + l1 := slen + num // new slice length + if l1 < slen { + err = errmsgExpandSliceOverflow + return } - switch currEncodedType { - case valueTypeArray: - containerLen = dd.readArrayLen() - containerLenS = containerLen - case valueTypeMap: - containerLen = dd.readMapLen() - containerLenS = containerLen * 2 - default: - decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", - currEncodedType) + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else if canChange { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } else { + err = errmsgExpandSliceCannotChange + return + } + return + } + if !canChange { + err = errmsgExpandSliceCannotChange + return } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) return } -func decErr(format string, params ...interface{}) { - doPanic(msgTagDec, format, params...) +func decReadFull(r io.Reader, bs []byte) (n uint, err error) { + var nn int + for n < uint(len(bs)) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += uint(nn) + } + } + // xdebugf("decReadFull: len(bs): %v, n: %v, err: %v", len(bs), n, err) + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} + +func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) { + if rawToString { + n.v = valueTypeString + n.s = string(dr.DecodeBytes(d.b[:], true)) + } else { + n.v = valueTypeBytes + n.l = dr.DecodeBytes(nil, false) + } } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/doc.go new file mode 100644 index 00000000000..5c5df9cfda1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/doc.go @@ -0,0 +1,245 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +Package codec provides a +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library +for binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +This package will carefully use 'package unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 4 +go releases e.g. current go release is go 1.12, so we support unsafe use only from +go 1.9+ . This is because supporting unsafe requires knowledge of implementation details. + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Excellent code coverage ( > 90% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - In-place updates during decode, with option to zero value in maps and slices prior to decode + - Coerce types where appropriate + e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Support IsZero() bool to determine if a value is a zero value. + Analogous to time.Time.IsZero() bool. + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Mapping a non-interface type to an interface, so we can decode appropriately + into any interface type with a correctly configured non-interface value. + - Encode a struct as an array, and decode struct from an array in the data stream + - Option to encode struct keys as numbers (instead of strings) + (to support structured streams with fields encoded as numeric codes) + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symmetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + + cd bench + go test -bench . -benchmem -benchtime 1s + +Please see http://github.com/ugorji/go-codec-bench . + +Managing Binary Size + +This package could add up to 10MB to the size of your binaries. + +This is because we include some a auto-generated file: `fast-path.generated.go` +to help with performance when encoding/decoding slices and maps of +built in numeric, boolean, string and interface{} types. + +You can override this by building (or running tests and benchmarks) +with the tag: `notfastpath`. + + go install -tags notfastpath + go build -tags notfastpath + go test -tags notfastpath + +Be aware that, at least in our representative microbenchmarks for cbor (for example), +we see up to 33% increase in decoding and 50% increase in encoding speeds. +YMMV. + +Caveats + +Struct fields matching the following are ignored during encoding and decoding + - struct tag value set to - + - func, complex numbers, unsafe pointers + - unexported and not embedded + - unexported and embedded and not struct kind + - unexported and embedded pointers (from go1.10) + +Every other field in a struct will be encoded/decoded. + +Embedded fields are encoded as if they exist in the top-level struct, +with some caveats. See Encode documentation. + +*/ +package codec + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go index 4914be0c748..9d8d1164a4b 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -1,626 +1,1267 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "encoding" + "errors" + "fmt" "io" "reflect" + "runtime" + "sort" + "strconv" + "time" ) -const ( - // Some tagging information for error messages. - msgTagEnc = "codec.encoder" - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe +// defEncByteBufSize is the default size of []byte used +// for bufio buffer or []byte (when nil passed) +const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota +var errEncoderNotInitialized = errors.New("Encoder not initialized") - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag +/* - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracting writing to a byte array or to an io.Writer. +// encWriter abstracts writing to a byte array or to an io.Writer. +// +// +// Deprecated: Use encWriterSwitch instead. type encWriter interface { - writeUint16(uint16) - writeUint32(uint32) - writeUint64(uint64) writeb([]byte) writestr(string) writen1(byte) writen2(byte, byte) - atEndOfEncode() + end() } +*/ + // encDriver abstracts the actual codec (binc vs msgpack, etc) type encDriver interface { - isBuiltinType(rt uintptr) bool - encodeBuiltin(rt uintptr, v interface{}) - encodeNil() - encodeInt(i int64) - encodeUint(i uint64) - encodeBool(b bool) - encodeFloat32(f float32) - encodeFloat64(f float64) - encodeExtPreamble(xtag byte, length int) - encodeArrayPreamble(length int) - encodeMapPreamble(length int) - encodeString(c charEncoding, v string) - encodeSymbol(v string) - encodeStringBytes(c charEncoding, v []byte) - //TODO + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + // Deprecated: use EncodeStringEnc instead + EncodeString(c charEncoding, v string) + // Deprecated: use EncodeStringBytesRaw instead + EncodeStringBytes(c charEncoding, v []byte) + EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW + // EncodeSymbol(v string) + EncodeStringBytesRaw(v []byte) + EncodeTime(time.Time) //encBignum(f *big.Int) //encStringRunes(c charEncoding, v []rune) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + + reset() + atEndOfEncode() } -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) +type encDriverAsis interface { + EncodeAsis(v []byte) } -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) +type encodeError struct { + codecError } +func (e encodeError) Error() string { + return fmt.Sprintf("%s encode error: %v", e.name, e.err) +} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayElem() {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapElemKey() {} +func (encDriverNoopContainerWriter) WriteMapElemValue() {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +type encDriverTrackContainerWriter struct { + c containerState +} + +func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } +func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } +func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } +func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } +func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } +func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } +func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } +func (e *encDriverTrackContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +// EncodeOptions captures configuration options during encode. type EncodeOptions struct { - // Encode a struct as an array, and not as a map. + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map StructToArray bool - // AsSymbols defines what should be encoded as symbols. + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. // - // Encoding as symbols can reduce the encoded size significantly. + // This only affects maps, as the iteration order for maps is random. // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. + // The implementation MAY use the natural sort order for the map keys if possible: // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool -// --------------------------------------------- + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - return o.w.Write([]byte(s)) + // StringToRaw controls how strings are encoded. + // + // As a go string is just an (immutable) sequence of bytes, + // it can be encoded either as raw bytes or as a UTF string. + // + // By default, strings are encoded as UTF-8. + // but can be treated as []byte during an encode. + // + // Note that things which we know (by definition) to be UTF-8 + // are ALWAYS encoded as UTF-8 strings. + // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. + StringToRaw bool + + // // AsSymbols defines what should be encoded as symbols. + // // + // // Encoding as symbols can reduce the encoded size significantly. + // // + // // However, during decoding, each string to be encoded as a symbol must + // // be checked to see if it has been seen before. Consequently, encoding time + // // will increase if using symbols, because string comparisons has a clear cost. + // // + // // Sample values: + // // AsSymbolNone + // // AsSymbolAll + // // AsSymbolMapStringKeys + // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + // AsSymbols AsSymbolFlag } -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} +// --------------------------------------------- + +/* -// ---------------------------------------- +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} // ioEncWriter implements encWriter and can write to an io.Writer implementation type ioEncWriter struct { - w ioEncWriterWriter - x [8]byte // temp byte array re-used internally for efficiency + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioFlusher + b [8]byte } -func (z *ioEncWriter) writeUint16(v uint16) { - bigen.PutUint16(z.x[:2], v) - z.writeb(z.x[:2]) +func (z *ioEncWriter) reset(w io.Writer) { + z.w = w + var ok bool + if z.bw, ok = w.(io.ByteWriter); !ok { + z.bw = z + } + if z.sw, ok = w.(ioEncStringWriter); !ok { + z.sw = z + } + z.fw, _ = w.(ioFlusher) + z.ww = w } -func (z *ioEncWriter) writeUint32(v uint32) { - bigen.PutUint32(z.x[:4], v) - z.writeb(z.x[:4]) +func (z *ioEncWriter) WriteByte(b byte) (err error) { + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return } -func (z *ioEncWriter) writeUint64(v uint64) { - bigen.PutUint64(z.x[:8], v) - z.writeb(z.x[:8]) +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) } func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { + if _, err := z.ww.Write(bs); err != nil { panic(err) } - if n != len(bs) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) - } } func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { + if _, err := z.sw.WriteString(s); err != nil { panic(err) } - if n != len(s) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) - } } func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { + if err := z.bw.WriteByte(b); err != nil { panic(err) } } -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) } -func (z *ioEncWriter) atEndOfEncode() {} +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } -// ---------------------------------------- +//go:noinline - so *encWriterSwitch.XXX has the bytesEncAppender.XXX inlined +func (z *ioEncWriter) end() { + if z.fw != nil { + if err := z.fw.Flush(); err != nil { + panic(err) + } + } +} -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode +*/ + +// --------------------------------------------- + +// bufioEncWriter +type bufioEncWriter struct { + buf []byte + w io.Writer + n int + sz int // buf size + + // Extensions can call Encode() within a current Encode() call. + // We need to know when the top level Encode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + bytesBufPooler + + _ [1]uint64 // padding + // a int + // b [4]byte + // err } -func (z *bytesEncWriter) writeUint16(v uint16) { - c := z.grow(2) - z.b[c] = byte(v >> 8) - z.b[c+1] = byte(v) +func (z *bufioEncWriter) reset(w io.Writer, bufsize int) { + z.w = w + z.n = 0 + z.calls = 0 + if bufsize <= 0 { + bufsize = defEncByteBufSize + } + z.sz = bufsize + if cap(z.buf) >= bufsize { + z.buf = z.buf[:cap(z.buf)] + } else { + z.buf = z.bytesBufPooler.get(bufsize) + // z.buf = make([]byte, bufsize) + } } -func (z *bytesEncWriter) writeUint32(v uint32) { - c := z.grow(4) - z.b[c] = byte(v >> 24) - z.b[c+1] = byte(v >> 16) - z.b[c+2] = byte(v >> 8) - z.b[c+3] = byte(v) +func (z *bufioEncWriter) release() { + z.buf = nil + z.bytesBufPooler.end() } -func (z *bytesEncWriter) writeUint64(v uint64) { - c := z.grow(8) - z.b[c] = byte(v >> 56) - z.b[c+1] = byte(v >> 48) - z.b[c+2] = byte(v >> 40) - z.b[c+3] = byte(v >> 32) - z.b[c+4] = byte(v >> 24) - z.b[c+5] = byte(v >> 16) - z.b[c+6] = byte(v >> 8) - z.b[c+7] = byte(v) +//go:noinline - flush only called intermittently +func (z *bufioEncWriter) flushErr() (err error) { + n, err := z.w.Write(z.buf[:z.n]) + z.n -= n + if z.n > 0 && err == nil { + err = io.ErrShortWrite + } + if n > 0 && z.n > 0 { + copy(z.buf, z.buf[n:z.n+n]) + } + return err } -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return +func (z *bufioEncWriter) flush() { + if err := z.flushErr(); err != nil { + panic(err) } - c := z.grow(len(s)) - copy(z.b[c:], s) } -func (z *bytesEncWriter) writestr(s string) { - c := z.grow(len(s)) - copy(z.b[c:], s) +func (z *bufioEncWriter) writeb(s []byte) { +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) } -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 +func (z *bufioEncWriter) writestr(s string) { + // z.writeb(bytesView(s)) // inlined below +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) } -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 +func (z *bufioEncWriter) writen1(b1 byte) { + if 1 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n] = b1 + z.n++ } -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] +func (z *bufioEncWriter) writen2(b1, b2 byte) { + if 2 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n+1] = b2 + z.buf[z.n] = b1 + z.n += 2 } -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > cap(z.b) { - // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). - // However, it was too expensive, causing too many iterations of copy. - // Using bytes.Buffer model was much better (2*cap + n) - bs := make([]byte, 2*cap(z.b)+n) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else if z.c > len(z.b) { - z.b = z.b[:cap(z.b)] +func (z *bufioEncWriter) endErr() (err error) { + if z.n > 0 { + err = z.flushErr() } return } // --------------------------------------------- -type encFnInfo struct { - ti *typeInfo - e *Encoder - ee encDriver - xfFn func(reflect.Value) ([]byte, error) - xfTag byte +// bytesEncAppender implements encWriter and can write to an byte slice. +type bytesEncAppender struct { + b []byte + out *[]byte } -func (f *encFnInfo) builtin(rv reflect.Value) { - f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +func (z *bytesEncAppender) writeb(s []byte) { + z.b = append(z.b, s...) } - -func (f *encFnInfo) rawExt(rv reflect.Value) { - f.e.encRawExt(rv.Interface().(RawExt)) +func (z *bytesEncAppender) writestr(s string) { + z.b = append(z.b, s...) } - -func (f *encFnInfo) ext(rv reflect.Value) { - bs, fnerr := f.xfFn(rv) - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - return - } - if f.e.hh.writeExt() { - f.ee.encodeExtPreamble(f.xfTag, len(bs)) - f.e.w.writeb(bs) - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } - +func (z *bytesEncAppender) writen1(b1 byte) { + z.b = append(z.b, b1) } - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryMarshaler - if f.ti.mIndir == 0 { - bm = rv.Interface().(binaryMarshaler) - } else if f.ti.mIndir == -1 { - bm = rv.Addr().Interface().(binaryMarshaler) - } else { - for j, k := int8(0), f.ti.mIndir; j < k; j++ { - if rv.IsNil() { - f.ee.encodeNil() - return - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryMarshaler) - } - // debugf(">>>> binaryMarshaler: %T", rv.Interface()) - bs, fnerr := bm.MarshalBinary() - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } +func (z *bytesEncAppender) writen2(b1, b2 byte) { + z.b = append(z.b, b1, b2) } +func (z *bytesEncAppender) endErr() error { + *(z.out) = z.b + return nil +} +func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + z.b = in[:0] + z.out = out +} + +// --------------------------------------------- -func (f *encFnInfo) kBool(rv reflect.Value) { - f.ee.encodeBool(rv.Bool()) +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) } -func (f *encFnInfo) kString(rv reflect.Value) { - f.ee.encodeString(c_UTF8, rv.String()) +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) } -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.ee.encodeFloat64(rv.Float()) +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) } -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.ee.encodeFloat32(float32(rv.Float())) +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) } -func (f *encFnInfo) kInt(rv reflect.Value) { - f.ee.encodeInt(rv.Int()) +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) } -func (f *encFnInfo) kUint(rv reflect.Value) { - f.ee.encodeUint(rv.Uint()) +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) } -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.ee.encodeNil() +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) } -func (f *encFnInfo) kErr(rv reflect.Value) { - encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() } -func (f *encFnInfo) kSlice(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case intfSliceTypId: - f.e.encSliceIntf(rv.Interface().([]interface{})) - return - case strSliceTypId: - f.e.encSliceStr(rv.Interface().([]string)) - return - case uint64SliceTypId: - f.e.encSliceUint64(rv.Interface().([]uint64)) +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() return - case int64SliceTypId: - f.e.encSliceInt64(rv.Interface().([]int64)) + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytesRaw(rv.Bytes()) return } } + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { + e.errorf("send-only channel cannot be encoded") + } + elemsep := e.esep + rtelem := ti.elem + rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 + var l int + // if a slice, array or chan of bytes, treat specially + if rtelemIsByte { + switch f.seq { + case seqTypeSlice: + ee.EncodeStringBytesRaw(rv.Bytes()) + case seqTypeArray: + l = rv.Len() + if rv.CanAddr() { + ee.EncodeStringBytesRaw(rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytesRaw(bs) + } + case seqTypeChan: + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. + + if rv.IsNil() { + ee.EncodeNil() + break + } + bs := e.b[:0] + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + + L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: // only consume available + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: // consume until timeout + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + // close(tt.C) + break L1 + } + } + default: // consume until close + for b := range ch { + bs = append(bs, b) + } + } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + ee.EncodeStringBytesRaw(bs) + } return } - l := rv.Len() - if f.ti.mbs { + // if chan, consume chan into a slice, and work off that slice. + if f.seq == seqTypeChan { + rvcs := reflect.Zero(reflect.SliceOf(rtelem)) + timeout := e.h.ChanRecvTimeout + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected + } + + l = rv.Len() + if ti.mbs { if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + e.errorf("mapBySlice requires even slice length, but got %v", l) + return } - f.ee.encodeMapPreamble(l / 2) + ee.WriteMapStart(l / 2) } else { - f.ee.encodeArrayPreamble(l) + ee.WriteArrayStart(l) } - if l == 0 { - return + + if l > 0 { + var fn *codecFn + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.h.fn(rtelem, true, true) + } + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + e.encodeValue(rv.Index(j), fn, true) + } } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() } } -func (f *encFnInfo) kArray(rv reflect.Value) { - // We cannot share kSlice method, because the array may be non-addressable. - // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". - // So we have to duplicate the functionality here. - // f.e.encodeValue(rv.Slice(0, rv.Len())) - // f.kSlice(rv.Slice(0, rv.Len())) +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfiSort + } - l := rv.Len() - // Handle an array of bytes specially (in line with what is done for slices) - if f.ti.rt.Elem().Kind() == reflect.Uint8 { - if l == 0 { - f.ee.encodeStringBytes(c_RAW, nil) - return + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + e.encodeValue(sfn.field(si), nil, true) + } } - var bs []byte - if rv.CanAddr() { - bs = rv.Slice(0, l).Bytes() + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } } else { - bs = make([]byte, l) - for i := 0; i < l; i++ { - bs[i] = byte(rv.Index(i).Uint()) + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) } } - f.ee.encodeStringBytes(c_RAW, bs) - return + ee.WriteArrayEnd() } +} - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } +func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { + encStructFieldKey(encName, e.e, e.w, keyType, encNameAsciiAlphaNum, e.js) } -func (f *encFnInfo) kStruct(rv reflect.Value) { +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { fti := f.ti - newlen := len(fti.sfi) - rvals := make([]reflect.Value, newlen) - var encnames []string - e := f.e - tisfi := fti.sfip + elemsep := e.esep + tisfi := fti.sfiSrc + var newlen int toMap := !(fti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if f.ti.mf { + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + toMap = true + newlen += len(mf) + } else if f.ti.mfp { + if rv.CanAddr() { + mf = rv2i(rv.Addr()).(MissingFielder).CodecMissingFields() + } else { + // make a new addressable value of same one, and use it + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + mf = rv2i(rv2).(MissingFielder).CodecMissingFields() + } + toMap = true + newlen += len(mf) + } // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) if toMap { - tisfi = fti.sfi - encnames = make([]string, newlen) + tisfi = fti.sfiSort } + newlen += len(tisfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) + var spool sfiRvPooler + var fkvs = spool.get(newlen) + + var kv sfiRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} newlen = 0 for _, si := range tisfi { - if si.i != -1 { - rvals[newlen] = rv.Field(int(si.i)) - } else { - rvals[newlen] = rv.FieldByIndex(si.is) - } + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) if toMap { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { continue } - encnames[newlen] = si.encName + kv.v = si // si.encName } else { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - rvals[newlen] = reflect.Value{} //encode as nil + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, + reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } } } + fkvs[newlen] = kv newlen++ } + fkvs = fkvs[:newlen] + + var mflen int + for k, v := range mf { + if k == "" { + delete(mf, k) + continue + } + if fti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur, recur) { + delete(mf, k) + continue + } + mflen++ + } - // debugf(">>>> kStruct: newlen: %v", newlen) + var j int if toMap { - ee := f.ee //don't dereference everytime - ee.encodeMapPreamble(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - if asSymbols { - ee.encodeSymbol(encnames[j]) - } else { - ee.encodeString(c_UTF8, encnames[j]) + ee.WriteMapStart(newlen + mflen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) } - e.encodeValue(rvals[j]) + } else { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + e.encodeValue(kv.r, nil, true) + } + } + // now, add the others + for k, v := range mf { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, false, k) + ee.WriteMapElemValue() + e.encode(v) } + ee.WriteMapEnd() } else { - f.ee.encodeArrayPreamble(newlen) - for j := 0; j < newlen; j++ { - e.encodeValue(rvals[j]) + ee.WriteArrayStart(newlen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + e.encodeValue(fkvs[j].r, nil, true) + } } + ee.WriteArrayEnd() } -} -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.ee.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - f.e.encodeValue(rv.Elem()) + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + spool.end() } -func (f *encFnInfo) kMap(rv reflect.Value) { +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e if rv.IsNil() { - f.ee.encodeNil() + ee.EncodeNil() return } - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case mapIntfIntfTypId: - f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) - return - case mapStrIntfTypId: - f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) - return - case mapStrStrTypId: - f.e.encMapStrStr(rv.Interface().(map[string]string)) - return - case mapInt64IntfTypId: - f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) - return - case mapUint64IntfTypId: - f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) - return - } - } - l := rv.Len() - f.ee.encodeMapPreamble(l) + ee.WriteMapStart(l) if l == 0 { + ee.WriteMapEnd() return } - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - keyTypeIsString := f.ti.rt.Key() == stringTyp - var asSymbols bool - if keyTypeIsString { - asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + // var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.key + rtkey := rtkey0 + rtval0 := ti.elem + rtval := rtval0 + // rtkeyid := rt2id(rtkey0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.h.fn(rtval, true, true) } mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn) + ee.WriteMapEnd() + return + } + + var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid + if !keyTypeIsString { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + // rtkeyid = rt2id(rtkey) + keyFn = e.h.fn(rtkey, true, true) + } + } + // for j, lmks := 0, len(mks); j < lmks; j++ { for j := range mks { + if e.esep { + ee.WriteMapElemKey() + } if keyTypeIsString { - if asSymbols { - f.ee.encodeSymbol(mks[j].String()) + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mks[j].String())) } else { - f.ee.encodeString(c_UTF8, mks[j].String()) + ee.EncodeStringEnc(cUTF8, mks[j].String()) } } else { - f.e.encodeValue(mks[j]) + e.encodeValue(mks[j], keyFn, true) + } + if e.esep { + ee.WriteMapElemValue() } - f.e.encodeValue(rv.MapIndex(mks[j])) + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + } + ee.WriteMapEnd() +} +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { + ee := e.e + elemsep := e.esep + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mksv[i].v)) + } else { + ee.EncodeStringEnc(cUTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Struct: + if rv.Type() == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + sort.Sort(timeRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeTime(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + break + } + fallthrough + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } } -// -------------------------------------------------- +// // -------------------------------------------------- + +type encWriterSwitch struct { + // wi *ioEncWriter + wb bytesEncAppender + wf *bufioEncWriter + // typ entryType + bytes bool // encoding to []byte + esep bool // whether it has elem separators + isas bool // whether e.as != nil + js bool // is json encoder? + be bool // is binary encoder? + _ [2]byte // padding + // _ [2]uint64 // padding + // _ uint64 // padding +} -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i *encFnInfo - f func(*encFnInfo, reflect.Value) +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) endErr() error { + if z.bytes { + return z.wb.endErr() + } + return z.wf.endErr() } -// -------------------------------------------------- +func (z *encWriterSwitch) end() { + if err := z.endErr(); err != nil { + panic(err) + } +} -// An Encoder writes an object to an output stream in the codec format. +/* + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writeb(s) + case entryTypeIo: + z.wi.writeb(s) + default: + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + switch z.typ { + case entryTypeBytes: + z.wb.writestr(s) + case entryTypeIo: + z.wi.writestr(s) + default: + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen1(b1) + case entryTypeIo: + z.wi.writen1(b1) + default: + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen2(b1, b2) + case entryTypeIo: + z.wi.writen2(b1, b2) + default: + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + switch z.typ { + case entryTypeBytes: + z.wb.end() + case entryTypeIo: + z.wi.end() + default: + z.wf.end() + } +} + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wi.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wi.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wi.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wi.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + if z.bytes { + z.wb.end() + } else { + z.wi.end() + } +} + +*/ + +// Encoder writes an object to an output stream in a supported format. +// +// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Encoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. type Encoder struct { - w encWriter - e encDriver + panicHdl + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w *encWriterSwitch + + // bw *bufio.Writer + as encDriverAsis + + err error + h *BasicHandle hh Handle - f map[uintptr]encFn - x []uintptr - s []encFn + // ---- cpu cache line boundary? + 3 + encWriterSwitch + + ci set + + b [(5 * 8)]byte // for encoding chan or (non-addressable) [N]byte + + // ---- writable fields during execution --- *try* to keep in sep cache line + + // ---- cpu cache line boundary? + // b [scratchByteArrayLen]byte + // _ [cacheLineSize - scratchByteArrayLen]byte // padding + // b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes } // NewEncoder returns an Encoder for encoding into an io.Writer. // -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). +// For efficiency, Users are encouraged to configure WriterBufferSize on the handle +// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). func NewEncoder(w io.Writer, h Handle) *Encoder { - ww, ok := w.(ioEncWriterWriter) - if !ok { - sww := simpleIoEncWriterWriter{w: w} - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - ww = &sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - z := ioEncWriter{ - w: ww, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} + e := newEncoder(h) + e.Reset(w) + return e } // NewEncoderBytes returns an encoder for encoding directly and efficiently @@ -629,373 +1270,541 @@ func NewEncoder(w io.Writer, h Handle) *Encoder { // It will potentially replace the output byte slice pointed to. // After encoding, the out parameter contains the encoded contents. func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - in := *out + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized} + e.bytes = true + if useFinalizers { + runtime.SetFinalizer(e, (*Encoder).finalize) + // xdebugf(">>>> new(Encoder) with finalizer") + } + e.w = &e.encWriterSwitch + e.hh = h + e.esep = h.hasElemSeparators() + + return e +} + +func (e *Encoder) resetCommon() { + // e.w = &e.encWriterSwitch + if e.e == nil || e.hh.recreateEncDriver(e.e) { + e.e = e.hh.newEncDriver(e) + e.as, e.isas = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + } + e.be = e.hh.isBinary() + _, e.js = e.hh.(*JsonHandle) + e.e.reset() + e.err = nil +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + if w == nil { + return + } + // var ok bool + e.bytes = false + if e.wf == nil { + e.wf = new(bufioEncWriter) + } + // e.typ = entryTypeUnset + // if e.h.WriterBufferSize > 0 { + // // bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + // // e.wi.bw = bw + // // e.wi.sw = bw + // // e.wi.fw = bw + // // e.wi.ww = bw + // if e.wf == nil { + // e.wf = new(bufioEncWriter) + // } + // e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + // } else { + // if e.wi == nil { + // e.wi = new(ioEncWriter) + // } + // e.wi.reset(w) + // e.typ = entryTypeIo + // } + e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + + // e.w = e.wi + e.resetCommon() +} + +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *Encoder) ResetBytes(out *[]byte) { + if out == nil { + return + } + var in []byte = *out if in == nil { in = make([]byte, defEncByteBufSize) } - z := bytesEncWriter{ - b: in, - out: out, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} + e.bytes = true + // e.typ = entryTypeBytes + e.wb.reset(in, out) + // e.w = &e.wb + e.resetCommon() } -// Encode writes an object into a stream in the codec format. +// Encode writes an object into a stream. // -// Encoding can be configured via the "codec" struct tag for the fields. +// Encoding can be configured via the struct tag for the fields. +// The key (in the struct tags) that we look at is configurable. // -// The "codec" key in struct field's tag value is the key name, +// By default, we look up the "codec" key in the struct field's tags, +// and fall bak to the "json" key if "codec" is absent. +// That key in struct field's tag value is the key name, // followed by an optional comma and options. // // To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. +// can create a field called _struct, and set flags on it. The options +// which can be set on _struct are: +// - omitempty: so all fields are omitted if empty +// - toarray: so struct is encoded as an array +// - int: so struct key names are encoded as signed integers (instead of strings) +// - uint: so struct key names are encoded as unsigned integers (instead of strings) +// - float: so struct key names are encoded as floats (instead of strings) +// More details on these below. // // Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's codec tag is "-", OR -// - the field is empty and its codec tag specifies the "omitempty" option. +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. // // When encoding as a map, the first string in the tag (before the comma) // is the map key string to use when encoding. +// ... +// This key is typically encoded as a string. +// However, there are instances where the encoded stream has mapping keys encoded as numbers. +// For example, some cbor streams have keys as integer codes in the stream, but they should map +// to fields in a structured object. Consequently, a struct is the natural representation in code. +// For these, configure the struct to encode/decode the keys as numbers (instead of string). +// This is done with the int,uint or float option on the _struct field (see above). // // However, struct values may encode as arrays. This happens when: // - StructToArray Encode option is set, OR -// - the codec tag on the _struct field sets the "toarray" option +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. // // Values with types that implement MapBySlice are encoded as stream maps. // // The empty values (for omitempty option) are false, 0, any nil pointer // or interface value, and any array, slice, map, or string of length zero. // -// Anonymous fields are encoded inline if no struct tag is present. -// Else they are encoded as regular fields. +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type // // Examples: // +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. // type MyStruct struct { // _struct bool `codec:",omitempty"` //set omitempty for every field // Field1 string `codec:"-"` //skip this field // Field2 int `codec:"myName"` //Use key "myName" in encode stream // Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. // Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it // ... // } // // type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// type MyStruct struct { +// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys +// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) +// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) // } // // The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method // - If an extension is registered for it, call that extension function -// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method // - Else encode it based on its reflect.Kind // // Note that struct field names and keys in map[string]XXX will be treated as symbols. // Some formats support symbols (e.g. binc) and will properly encode the string // only once in the stream, and use a tag to refer to it thereafter. func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { e.deferred(&err) }() } + // { x, y := e, &err; defer func() { x.deferred(y) }() } + if e.err != nil { + return e.err + } + if recoverPanicToErr { + defer func() { + // if error occurred during encoding, return that error; + // else if error occurred on end'ing (i.e. during flush), return that error. + err = e.w.endErr() + x := recover() + if x == nil { + e.err = err + } else { + panicValToErr(e, x, &e.err) + err = e.err + } + }() + } + + // defer e.deferred(&err) + e.mustEncode(v) return } +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.mustEncode(v) +} + +func (e *Encoder) mustEncode(v interface{}) { + if e.wf == nil { + e.encode(v) + e.e.atEndOfEncode() + e.w.end() + return + } + + if e.wf.buf == nil { + e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz) + } + e.wf.calls++ + + e.encode(v) + + e.wf.calls-- + + if e.wf.calls == 0 { + e.e.atEndOfEncode() + e.w.end() + if !e.h.ExplicitRelease { + e.wf.release() + } + } +} + +// func (e *Encoder) deferred(err1 *error) { +// e.w.end() +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(e, x, err1) +// panicValToErr(e, x, &e.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (e *Encoder) finalize() { + // xdebugf("finalizing Encoder") + e.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with an Encoder, so those resources +// are released instantly for use by subsequently created Encoders. +func (e *Encoder) Release() { + if e.wf != nil { + e.wf.release() + } +} + func (e *Encoder) encode(iv interface{}) { - switch v := iv.(type) { - case nil: - e.e.encodeNil() + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + switch v := iv.(type) { + // case nil: + // case Selfer: + case Raw: + e.rawBytes(v) case reflect.Value: - e.encodeValue(v) + e.encodeValue(v, nil, true) case string: - e.e.encodeString(c_UTF8, v) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(v)) + } else { + e.e.EncodeStringEnc(cUTF8, v) + } case bool: - e.e.encodeBool(v) + e.e.EncodeBool(v) case int: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int8: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int16: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int32: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int64: - e.e.encodeInt(v) + e.e.EncodeInt(v) case uint: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint8: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint16: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint32: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint64: - e.e.encodeUint(v) + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) case float32: - e.e.encodeFloat32(v) + e.e.EncodeFloat32(v) case float64: - e.e.encodeFloat64(v) - - case []interface{}: - e.encSliceIntf(v) - case []string: - e.encSliceStr(v) - case []int64: - e.encSliceInt64(v) - case []uint64: - e.encSliceUint64(v) + e.e.EncodeFloat64(v) + case time.Time: + e.e.EncodeTime(v) case []uint8: - e.e.encodeStringBytes(c_RAW, v) - - case map[interface{}]interface{}: - e.encMapIntfIntf(v) - case map[string]interface{}: - e.encMapStrIntf(v) - case map[string]string: - e.encMapStrStr(v) - case map[int64]interface{}: - e.encMapInt64Intf(v) - case map[uint64]interface{}: - e.encMapUint64Intf(v) + e.e.EncodeStringBytesRaw(v) + + case *Raw: + e.rawBytes(*v) case *string: - e.e.encodeString(c_UTF8, *v) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(*v)) + } else { + e.e.EncodeStringEnc(cUTF8, *v) + } case *bool: - e.e.encodeBool(*v) + e.e.EncodeBool(*v) case *int: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int8: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int16: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int32: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int64: - e.e.encodeInt(*v) + e.e.EncodeInt(*v) case *uint: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint8: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint16: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint32: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint64: - e.e.encodeUint(*v) + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) case *float32: - e.e.encodeFloat32(*v) + e.e.EncodeFloat32(*v) case *float64: - e.e.encodeFloat64(*v) - - case *[]interface{}: - e.encSliceIntf(*v) - case *[]string: - e.encSliceStr(*v) - case *[]int64: - e.encSliceInt64(*v) - case *[]uint64: - e.encSliceUint64(*v) + e.e.EncodeFloat64(*v) + case *time.Time: + e.e.EncodeTime(*v) + case *[]uint8: - e.e.encodeStringBytes(c_RAW, *v) - - case *map[interface{}]interface{}: - e.encMapIntfIntf(*v) - case *map[string]interface{}: - e.encMapStrIntf(*v) - case *map[string]string: - e.encMapStrStr(*v) - case *map[int64]interface{}: - e.encMapInt64Intf(*v) - case *map[uint64]interface{}: - e.encMapUint64Intf(*v) + e.e.EncodeStringBytesRaw(*v) default: - e.encodeValue(reflect.ValueOf(iv)) + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + } else if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } } } -func (e *Encoder) encodeValue(rv reflect.Value) { - for rv.Kind() == reflect.Ptr { +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool +TOP: + switch rv.Kind() { + case reflect.Ptr: if rv.IsNil() { - e.e.encodeNil() + e.e.EncodeNil() return } + rvpValid = true + rvp = rv rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } - var fn encFn - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i, v := range e.x { - if v == rtid { - fn, ok = e.s[i], true - break - } + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP } - } - if !ok { - // debugf("\tCreating new enc fn for type: %v\n", rt) - fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} - fn.i = &fi - if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.isBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*encFnInfo).ext - } else if supportBinaryMarshal && fi.ti.m { - fn.f = (*encFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Slice: - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fn.f = (*encFnInfo).kArray - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]encFn, 16) - } - e.f[rtid] = fn - } else { - e.s = append(e.s, fn) - e.x = append(e.x, rtid) + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return } - fn.f(fn.i, rv) - -} + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } -func (e *Encoder) encRawExt(re RawExt) { - if re.Data == nil { - e.e.encodeNil() - return + if fn == nil { + rt := rv.Type() + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.h.fn(rt, checkFastpath, true) } - if e.hh.writeExt() { - e.e.encodeExtPreamble(re.Tag, len(re.Data)) - e.w.writeb(re.Data) + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } } else { - e.e.encodeStringBytes(c_RAW, re.Data) + fn.fe(e, &fn.i, rv) + } + if sptr != 0 { + (&e.ci).remove(sptr) } } -// --------------------------------------------- -// short circuit functions for common maps and slices +// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { +// if fnerr != nil { +// panic(fnerr) +// } +// if bs == nil { +// e.e.EncodeNil() +// } else if asis { +// e.asis(bs) +// } else { +// e.e.EncodeStringBytesRaw(bs) +// } +// } -func (e *Encoder) encSliceIntf(v []interface{}) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.encode(v2) +func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) } -} - -func (e *Encoder) encSliceStr(v []string) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeString(c_UTF8, v2) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringEnc(cUTF8, stringView(bs)) } } -func (e *Encoder) encSliceInt64(v []int64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeInt(v2) +func (e *Encoder) marshalAsis(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) } -} - -func (e *Encoder) encSliceUint64(v []uint64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeUint(v2) + if bs == nil { + e.e.EncodeNil() + } else { + e.asis(bs) } } -func (e *Encoder) encMapStrStr(v map[string]string) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.e.encodeString(c_UTF8, v2) +func (e *Encoder) marshalRaw(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringBytesRaw(bs) } } -func (e *Encoder) encMapStrIntf(v map[string]interface{}) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.encode(v2) +func (e *Encoder) asis(v []byte) { + if e.isas { + e.as.EncodeAsis(v) + } else { + e.w.writeb(v) } } -func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeInt(k2) - e.encode(v2) +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) } + e.asis(v) } -func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeUint(uint64(k2)) - e.encode(v2) - } +func (e *Encoder) wrapErr(v interface{}, err *error) { + *err = encodeError{codecError{name: e.hh.Name(), err: v}} } -func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.encode(k2) - e.encode(v2) +func encStructFieldKey(encName string, ee encDriver, w *encWriterSwitch, + keyType valueType, encNameAsciiAlphaNum bool, js bool) { + var m must + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + if keyType == valueTypeString { + if js && encNameAsciiAlphaNum { // keyType == valueTypeString + // w.writen1('"') + // w.writestr(encName) + // w.writen1('"') + // ---- + // w.writestr(`"` + encName + `"`) + // ---- + // do concat myself, so it is faster than the generic string concat + b := make([]byte, len(encName)+2) + copy(b[1:], encName) + b[0] = '"' + b[len(b)-1] = '"' + w.writeb(b) + } else { // keyType == valueTypeString + ee.EncodeStringEnc(cUTF8, encName) + } + } else if keyType == valueTypeInt { + ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64))) } } -// ---------------------------------------- - -func encErr(format string, params ...interface{}) { - doPanic(msgTagEnc, format, params...) -} +// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) { +// if stringToRaw { +// ee.EncodeStringBytesRaw(bytesView(s)) +// } else { +// ee.EncodeStringEnc(cUTF8, s) +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go new file mode 100644 index 00000000000..9bc14bd638a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go @@ -0,0 +1,33668 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fast-path.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + // Note: we use goto (instead of for loop) so this can be inlined. + // h, i, j := 0, 0, len(x) + var h, i uint + var j = uint(len(x)) +LOOP: + if i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(x)) && x[i].rtid == rtid { + return int(i) + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } +func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + var i uint = 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + } + + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) + } else { + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) + } else { + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) + } else { + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat32(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat32(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) + } else { + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat64(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat64(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) + } else { + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e) + } else { + fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e) + } +} +func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) + } else { + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) + } else { + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) + } else { + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) + } else { + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) + } else { + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) + } else { + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) + } else { + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) + } else { + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) + } else { + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) + } else { + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeBool(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeBool(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[string(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[string(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[float32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[float32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[float64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[float64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint8(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint8(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint16(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint16(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uintptr(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uintptr(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int8(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int8(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int16(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int16(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[bool(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[bool(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + var changed bool + switch v := iv.(type) { + + case []interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(*v, true, d) + if changed { + *v = v2 + } + case []string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(*v, true, d) + if changed { + *v = v2 + } + case []float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d) + if changed { + *v = v2 + } + case []float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d) + if changed { + *v = v2 + } + case []uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(*v, true, d) + if changed { + *v = v2 + } + case []uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(*v, true, d) + if changed { + *v = v2 + } + case []uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(*v, true, d) + if changed { + *v = v2 + } + case []uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(*v, true, d) + if changed { + *v = v2 + } + case []uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d) + if changed { + *v = v2 + } + case []int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(*v, true, d) + if changed { + *v = v2 + } + case []int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(*v, true, d) + if changed { + *v = v2 + } + case []int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(*v, true, d) + if changed { + *v = v2 + } + case []int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(*v, true, d) + if changed { + *v = v2 + } + case []int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(*v, true, d) + if changed { + *v = v2 + } + case []bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(*v, true, d) + if changed { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, false, d) + case *map[interface{}]interface{}: + var v2 map[interface{}]interface{} + v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, false, d) + case *map[interface{}]string: + var v2 map[interface{}]string + v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, false, d) + case *map[interface{}]uint: + var v2 map[interface{}]uint + v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, false, d) + case *map[interface{}]uint8: + var v2 map[interface{}]uint8 + v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, false, d) + case *map[interface{}]uint16: + var v2 map[interface{}]uint16 + v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, false, d) + case *map[interface{}]uint32: + var v2 map[interface{}]uint32 + v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, false, d) + case *map[interface{}]uint64: + var v2 map[interface{}]uint64 + v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, false, d) + case *map[interface{}]uintptr: + var v2 map[interface{}]uintptr + v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, false, d) + case *map[interface{}]int: + var v2 map[interface{}]int + v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, false, d) + case *map[interface{}]int8: + var v2 map[interface{}]int8 + v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, false, d) + case *map[interface{}]int16: + var v2 map[interface{}]int16 + v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, false, d) + case *map[interface{}]int32: + var v2 map[interface{}]int32 + v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, false, d) + case *map[interface{}]int64: + var v2 map[interface{}]int64 + v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, false, d) + case *map[interface{}]float32: + var v2 map[interface{}]float32 + v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, false, d) + case *map[interface{}]float64: + var v2 map[interface{}]float64 + v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, false, d) + case *map[interface{}]bool: + var v2 map[interface{}]bool + v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, false, d) + case *map[string]interface{}: + var v2 map[string]interface{} + v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[string]string: + fastpathTV.DecMapStringStringV(v, false, d) + case *map[string]string: + var v2 map[string]string + v2, changed = fastpathTV.DecMapStringStringV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint: + fastpathTV.DecMapStringUintV(v, false, d) + case *map[string]uint: + var v2 map[string]uint + v2, changed = fastpathTV.DecMapStringUintV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, false, d) + case *map[string]uint8: + var v2 map[string]uint8 + v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, false, d) + case *map[string]uint16: + var v2 map[string]uint16 + v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, false, d) + case *map[string]uint32: + var v2 map[string]uint32 + v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, false, d) + case *map[string]uint64: + var v2 map[string]uint64 + v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, false, d) + case *map[string]uintptr: + var v2 map[string]uintptr + v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int: + fastpathTV.DecMapStringIntV(v, false, d) + case *map[string]int: + var v2 map[string]int + v2, changed = fastpathTV.DecMapStringIntV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, false, d) + case *map[string]int8: + var v2 map[string]int8 + v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, false, d) + case *map[string]int16: + var v2 map[string]int16 + v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, false, d) + case *map[string]int32: + var v2 map[string]int32 + v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, false, d) + case *map[string]int64: + var v2 map[string]int64 + v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, false, d) + case *map[string]float32: + var v2 map[string]float32 + v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, false, d) + case *map[string]float64: + var v2 map[string]float64 + v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, false, d) + case *map[string]bool: + var v2 map[string]bool + v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, false, d) + case *map[float32]interface{}: + var v2 map[float32]interface{} + v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, false, d) + case *map[float32]string: + var v2 map[float32]string + v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, false, d) + case *map[float32]uint: + var v2 map[float32]uint + v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, false, d) + case *map[float32]uint8: + var v2 map[float32]uint8 + v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, false, d) + case *map[float32]uint16: + var v2 map[float32]uint16 + v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, false, d) + case *map[float32]uint32: + var v2 map[float32]uint32 + v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, false, d) + case *map[float32]uint64: + var v2 map[float32]uint64 + v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, false, d) + case *map[float32]uintptr: + var v2 map[float32]uintptr + v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, false, d) + case *map[float32]int: + var v2 map[float32]int + v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, false, d) + case *map[float32]int8: + var v2 map[float32]int8 + v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, false, d) + case *map[float32]int16: + var v2 map[float32]int16 + v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, false, d) + case *map[float32]int32: + var v2 map[float32]int32 + v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, false, d) + case *map[float32]int64: + var v2 map[float32]int64 + v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, false, d) + case *map[float32]float32: + var v2 map[float32]float32 + v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, false, d) + case *map[float32]float64: + var v2 map[float32]float64 + v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, false, d) + case *map[float32]bool: + var v2 map[float32]bool + v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, false, d) + case *map[float64]interface{}: + var v2 map[float64]interface{} + v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, false, d) + case *map[float64]string: + var v2 map[float64]string + v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, false, d) + case *map[float64]uint: + var v2 map[float64]uint + v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, false, d) + case *map[float64]uint8: + var v2 map[float64]uint8 + v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, false, d) + case *map[float64]uint16: + var v2 map[float64]uint16 + v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, false, d) + case *map[float64]uint32: + var v2 map[float64]uint32 + v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, false, d) + case *map[float64]uint64: + var v2 map[float64]uint64 + v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, false, d) + case *map[float64]uintptr: + var v2 map[float64]uintptr + v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, false, d) + case *map[float64]int: + var v2 map[float64]int + v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, false, d) + case *map[float64]int8: + var v2 map[float64]int8 + v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, false, d) + case *map[float64]int16: + var v2 map[float64]int16 + v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, false, d) + case *map[float64]int32: + var v2 map[float64]int32 + v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, false, d) + case *map[float64]int64: + var v2 map[float64]int64 + v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, false, d) + case *map[float64]float32: + var v2 map[float64]float32 + v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, false, d) + case *map[float64]float64: + var v2 map[float64]float64 + v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, false, d) + case *map[float64]bool: + var v2 map[float64]bool + v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, false, d) + case *map[uint]interface{}: + var v2 map[uint]interface{} + v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]string: + fastpathTV.DecMapUintStringV(v, false, d) + case *map[uint]string: + var v2 map[uint]string + v2, changed = fastpathTV.DecMapUintStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, false, d) + case *map[uint]uint: + var v2 map[uint]uint + v2, changed = fastpathTV.DecMapUintUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, false, d) + case *map[uint]uint8: + var v2 map[uint]uint8 + v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, false, d) + case *map[uint]uint16: + var v2 map[uint]uint16 + v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, false, d) + case *map[uint]uint32: + var v2 map[uint]uint32 + v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, false, d) + case *map[uint]uint64: + var v2 map[uint]uint64 + v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, false, d) + case *map[uint]uintptr: + var v2 map[uint]uintptr + v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int: + fastpathTV.DecMapUintIntV(v, false, d) + case *map[uint]int: + var v2 map[uint]int + v2, changed = fastpathTV.DecMapUintIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, false, d) + case *map[uint]int8: + var v2 map[uint]int8 + v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, false, d) + case *map[uint]int16: + var v2 map[uint]int16 + v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, false, d) + case *map[uint]int32: + var v2 map[uint]int32 + v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, false, d) + case *map[uint]int64: + var v2 map[uint]int64 + v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, false, d) + case *map[uint]float32: + var v2 map[uint]float32 + v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, false, d) + case *map[uint]float64: + var v2 map[uint]float64 + v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, false, d) + case *map[uint]bool: + var v2 map[uint]bool + v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, false, d) + case *map[uint8]interface{}: + var v2 map[uint8]interface{} + v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, false, d) + case *map[uint8]string: + var v2 map[uint8]string + v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, false, d) + case *map[uint8]uint: + var v2 map[uint8]uint + v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, false, d) + case *map[uint8]uint8: + var v2 map[uint8]uint8 + v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, false, d) + case *map[uint8]uint16: + var v2 map[uint8]uint16 + v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, false, d) + case *map[uint8]uint32: + var v2 map[uint8]uint32 + v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, false, d) + case *map[uint8]uint64: + var v2 map[uint8]uint64 + v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, false, d) + case *map[uint8]uintptr: + var v2 map[uint8]uintptr + v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, false, d) + case *map[uint8]int: + var v2 map[uint8]int + v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, false, d) + case *map[uint8]int8: + var v2 map[uint8]int8 + v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, false, d) + case *map[uint8]int16: + var v2 map[uint8]int16 + v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, false, d) + case *map[uint8]int32: + var v2 map[uint8]int32 + v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, false, d) + case *map[uint8]int64: + var v2 map[uint8]int64 + v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, false, d) + case *map[uint8]float32: + var v2 map[uint8]float32 + v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, false, d) + case *map[uint8]float64: + var v2 map[uint8]float64 + v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, false, d) + case *map[uint8]bool: + var v2 map[uint8]bool + v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, false, d) + case *map[uint16]interface{}: + var v2 map[uint16]interface{} + v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, false, d) + case *map[uint16]string: + var v2 map[uint16]string + v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, false, d) + case *map[uint16]uint: + var v2 map[uint16]uint + v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, false, d) + case *map[uint16]uint8: + var v2 map[uint16]uint8 + v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, false, d) + case *map[uint16]uint16: + var v2 map[uint16]uint16 + v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, false, d) + case *map[uint16]uint32: + var v2 map[uint16]uint32 + v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, false, d) + case *map[uint16]uint64: + var v2 map[uint16]uint64 + v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, false, d) + case *map[uint16]uintptr: + var v2 map[uint16]uintptr + v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, false, d) + case *map[uint16]int: + var v2 map[uint16]int + v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, false, d) + case *map[uint16]int8: + var v2 map[uint16]int8 + v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, false, d) + case *map[uint16]int16: + var v2 map[uint16]int16 + v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, false, d) + case *map[uint16]int32: + var v2 map[uint16]int32 + v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, false, d) + case *map[uint16]int64: + var v2 map[uint16]int64 + v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, false, d) + case *map[uint16]float32: + var v2 map[uint16]float32 + v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, false, d) + case *map[uint16]float64: + var v2 map[uint16]float64 + v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, false, d) + case *map[uint16]bool: + var v2 map[uint16]bool + v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, false, d) + case *map[uint32]interface{}: + var v2 map[uint32]interface{} + v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, false, d) + case *map[uint32]string: + var v2 map[uint32]string + v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, false, d) + case *map[uint32]uint: + var v2 map[uint32]uint + v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, false, d) + case *map[uint32]uint8: + var v2 map[uint32]uint8 + v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, false, d) + case *map[uint32]uint16: + var v2 map[uint32]uint16 + v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, false, d) + case *map[uint32]uint32: + var v2 map[uint32]uint32 + v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, false, d) + case *map[uint32]uint64: + var v2 map[uint32]uint64 + v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, false, d) + case *map[uint32]uintptr: + var v2 map[uint32]uintptr + v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, false, d) + case *map[uint32]int: + var v2 map[uint32]int + v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, false, d) + case *map[uint32]int8: + var v2 map[uint32]int8 + v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, false, d) + case *map[uint32]int16: + var v2 map[uint32]int16 + v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, false, d) + case *map[uint32]int32: + var v2 map[uint32]int32 + v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, false, d) + case *map[uint32]int64: + var v2 map[uint32]int64 + v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, false, d) + case *map[uint32]float32: + var v2 map[uint32]float32 + v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, false, d) + case *map[uint32]float64: + var v2 map[uint32]float64 + v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, false, d) + case *map[uint32]bool: + var v2 map[uint32]bool + v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, false, d) + case *map[uint64]interface{}: + var v2 map[uint64]interface{} + v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, false, d) + case *map[uint64]string: + var v2 map[uint64]string + v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, false, d) + case *map[uint64]uint: + var v2 map[uint64]uint + v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, false, d) + case *map[uint64]uint8: + var v2 map[uint64]uint8 + v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, false, d) + case *map[uint64]uint16: + var v2 map[uint64]uint16 + v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, false, d) + case *map[uint64]uint32: + var v2 map[uint64]uint32 + v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, false, d) + case *map[uint64]uint64: + var v2 map[uint64]uint64 + v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, false, d) + case *map[uint64]uintptr: + var v2 map[uint64]uintptr + v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, false, d) + case *map[uint64]int: + var v2 map[uint64]int + v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, false, d) + case *map[uint64]int8: + var v2 map[uint64]int8 + v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, false, d) + case *map[uint64]int16: + var v2 map[uint64]int16 + v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, false, d) + case *map[uint64]int32: + var v2 map[uint64]int32 + v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, false, d) + case *map[uint64]int64: + var v2 map[uint64]int64 + v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, false, d) + case *map[uint64]float32: + var v2 map[uint64]float32 + v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, false, d) + case *map[uint64]float64: + var v2 map[uint64]float64 + v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, false, d) + case *map[uint64]bool: + var v2 map[uint64]bool + v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, false, d) + case *map[uintptr]interface{}: + var v2 map[uintptr]interface{} + v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, false, d) + case *map[uintptr]string: + var v2 map[uintptr]string + v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, false, d) + case *map[uintptr]uint: + var v2 map[uintptr]uint + v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, false, d) + case *map[uintptr]uint8: + var v2 map[uintptr]uint8 + v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, false, d) + case *map[uintptr]uint16: + var v2 map[uintptr]uint16 + v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, false, d) + case *map[uintptr]uint32: + var v2 map[uintptr]uint32 + v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, false, d) + case *map[uintptr]uint64: + var v2 map[uintptr]uint64 + v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, false, d) + case *map[uintptr]uintptr: + var v2 map[uintptr]uintptr + v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, false, d) + case *map[uintptr]int: + var v2 map[uintptr]int + v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, false, d) + case *map[uintptr]int8: + var v2 map[uintptr]int8 + v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, false, d) + case *map[uintptr]int16: + var v2 map[uintptr]int16 + v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, false, d) + case *map[uintptr]int32: + var v2 map[uintptr]int32 + v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, false, d) + case *map[uintptr]int64: + var v2 map[uintptr]int64 + v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, false, d) + case *map[uintptr]float32: + var v2 map[uintptr]float32 + v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, false, d) + case *map[uintptr]float64: + var v2 map[uintptr]float64 + v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, false, d) + case *map[uintptr]bool: + var v2 map[uintptr]bool + v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, false, d) + case *map[int]interface{}: + var v2 map[int]interface{} + v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int]string: + fastpathTV.DecMapIntStringV(v, false, d) + case *map[int]string: + var v2 map[int]string + v2, changed = fastpathTV.DecMapIntStringV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint: + fastpathTV.DecMapIntUintV(v, false, d) + case *map[int]uint: + var v2 map[int]uint + v2, changed = fastpathTV.DecMapIntUintV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, false, d) + case *map[int]uint8: + var v2 map[int]uint8 + v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, false, d) + case *map[int]uint16: + var v2 map[int]uint16 + v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, false, d) + case *map[int]uint32: + var v2 map[int]uint32 + v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, false, d) + case *map[int]uint64: + var v2 map[int]uint64 + v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, false, d) + case *map[int]uintptr: + var v2 map[int]uintptr + v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int: + fastpathTV.DecMapIntIntV(v, false, d) + case *map[int]int: + var v2 map[int]int + v2, changed = fastpathTV.DecMapIntIntV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, false, d) + case *map[int]int8: + var v2 map[int]int8 + v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, false, d) + case *map[int]int16: + var v2 map[int]int16 + v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, false, d) + case *map[int]int32: + var v2 map[int]int32 + v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, false, d) + case *map[int]int64: + var v2 map[int]int64 + v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, false, d) + case *map[int]float32: + var v2 map[int]float32 + v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, false, d) + case *map[int]float64: + var v2 map[int]float64 + v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, false, d) + case *map[int]bool: + var v2 map[int]bool + v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, false, d) + case *map[int8]interface{}: + var v2 map[int8]interface{} + v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, false, d) + case *map[int8]string: + var v2 map[int8]string + v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, false, d) + case *map[int8]uint: + var v2 map[int8]uint + v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, false, d) + case *map[int8]uint8: + var v2 map[int8]uint8 + v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, false, d) + case *map[int8]uint16: + var v2 map[int8]uint16 + v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, false, d) + case *map[int8]uint32: + var v2 map[int8]uint32 + v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, false, d) + case *map[int8]uint64: + var v2 map[int8]uint64 + v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, false, d) + case *map[int8]uintptr: + var v2 map[int8]uintptr + v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, false, d) + case *map[int8]int: + var v2 map[int8]int + v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, false, d) + case *map[int8]int8: + var v2 map[int8]int8 + v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, false, d) + case *map[int8]int16: + var v2 map[int8]int16 + v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, false, d) + case *map[int8]int32: + var v2 map[int8]int32 + v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, false, d) + case *map[int8]int64: + var v2 map[int8]int64 + v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, false, d) + case *map[int8]float32: + var v2 map[int8]float32 + v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, false, d) + case *map[int8]float64: + var v2 map[int8]float64 + v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, false, d) + case *map[int8]bool: + var v2 map[int8]bool + v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, false, d) + case *map[int16]interface{}: + var v2 map[int16]interface{} + v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, false, d) + case *map[int16]string: + var v2 map[int16]string + v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, false, d) + case *map[int16]uint: + var v2 map[int16]uint + v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, false, d) + case *map[int16]uint8: + var v2 map[int16]uint8 + v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, false, d) + case *map[int16]uint16: + var v2 map[int16]uint16 + v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, false, d) + case *map[int16]uint32: + var v2 map[int16]uint32 + v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, false, d) + case *map[int16]uint64: + var v2 map[int16]uint64 + v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, false, d) + case *map[int16]uintptr: + var v2 map[int16]uintptr + v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, false, d) + case *map[int16]int: + var v2 map[int16]int + v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, false, d) + case *map[int16]int8: + var v2 map[int16]int8 + v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, false, d) + case *map[int16]int16: + var v2 map[int16]int16 + v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, false, d) + case *map[int16]int32: + var v2 map[int16]int32 + v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, false, d) + case *map[int16]int64: + var v2 map[int16]int64 + v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, false, d) + case *map[int16]float32: + var v2 map[int16]float32 + v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, false, d) + case *map[int16]float64: + var v2 map[int16]float64 + v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, false, d) + case *map[int16]bool: + var v2 map[int16]bool + v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, false, d) + case *map[int32]interface{}: + var v2 map[int32]interface{} + v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, false, d) + case *map[int32]string: + var v2 map[int32]string + v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, false, d) + case *map[int32]uint: + var v2 map[int32]uint + v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, false, d) + case *map[int32]uint8: + var v2 map[int32]uint8 + v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, false, d) + case *map[int32]uint16: + var v2 map[int32]uint16 + v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, false, d) + case *map[int32]uint32: + var v2 map[int32]uint32 + v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, false, d) + case *map[int32]uint64: + var v2 map[int32]uint64 + v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, false, d) + case *map[int32]uintptr: + var v2 map[int32]uintptr + v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, false, d) + case *map[int32]int: + var v2 map[int32]int + v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, false, d) + case *map[int32]int8: + var v2 map[int32]int8 + v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, false, d) + case *map[int32]int16: + var v2 map[int32]int16 + v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, false, d) + case *map[int32]int32: + var v2 map[int32]int32 + v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, false, d) + case *map[int32]int64: + var v2 map[int32]int64 + v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, false, d) + case *map[int32]float32: + var v2 map[int32]float32 + v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, false, d) + case *map[int32]float64: + var v2 map[int32]float64 + v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, false, d) + case *map[int32]bool: + var v2 map[int32]bool + v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, false, d) + case *map[int64]interface{}: + var v2 map[int64]interface{} + v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, false, d) + case *map[int64]string: + var v2 map[int64]string + v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, false, d) + case *map[int64]uint: + var v2 map[int64]uint + v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, false, d) + case *map[int64]uint8: + var v2 map[int64]uint8 + v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, false, d) + case *map[int64]uint16: + var v2 map[int64]uint16 + v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, false, d) + case *map[int64]uint32: + var v2 map[int64]uint32 + v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, false, d) + case *map[int64]uint64: + var v2 map[int64]uint64 + v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, false, d) + case *map[int64]uintptr: + var v2 map[int64]uintptr + v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, false, d) + case *map[int64]int: + var v2 map[int64]int + v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, false, d) + case *map[int64]int8: + var v2 map[int64]int8 + v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, false, d) + case *map[int64]int16: + var v2 map[int64]int16 + v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, false, d) + case *map[int64]int32: + var v2 map[int64]int32 + v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, false, d) + case *map[int64]int64: + var v2 map[int64]int64 + v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, false, d) + case *map[int64]float32: + var v2 map[int64]float32 + v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, false, d) + case *map[int64]float64: + var v2 map[int64]float64 + v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, false, d) + case *map[int64]bool: + var v2 map[int64]bool + v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, false, d) + case *map[bool]interface{}: + var v2 map[bool]interface{} + v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, false, d) + case *map[bool]string: + var v2 map[bool]string + v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, false, d) + case *map[bool]uint: + var v2 map[bool]uint + v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, false, d) + case *map[bool]uint8: + var v2 map[bool]uint8 + v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, false, d) + case *map[bool]uint16: + var v2 map[bool]uint16 + v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, false, d) + case *map[bool]uint32: + var v2 map[bool]uint32 + v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, false, d) + case *map[bool]uint64: + var v2 map[bool]uint64 + v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, false, d) + case *map[bool]uintptr: + var v2 map[bool]uintptr + v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, false, d) + case *map[bool]int: + var v2 map[bool]int + v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, false, d) + case *map[bool]int8: + var v2 map[bool]int8 + v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, false, d) + case *map[bool]int16: + var v2 map[bool]int16 + v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, false, d) + case *map[bool]int32: + var v2 map[bool]int32 + v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, false, d) + case *map[bool]int64: + var v2 map[bool]int64 + v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, false, d) + case *map[bool]float32: + var v2 map[bool]float32 + v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, false, d) + case *map[bool]float64: + var v2 map[bool]float64 + v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, false, d) + case *map[bool]bool: + var v2 map[bool]bool + v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d) + if changed { + *v = v2 + } + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + case *[]string: + *v = nil + case *[]float32: + *v = nil + case *[]float64: + *v = nil + case *[]uint: + *v = nil + case *[]uint8: + *v = nil + case *[]uint16: + *v = nil + case *[]uint32: + *v = nil + case *[]uint64: + *v = nil + case *[]uintptr: + *v = nil + case *[]int: + *v = nil + case *[]int8: + *v = nil + case *[]int16: + *v = nil + case *[]int32: + *v = nil + case *[]int64: + *v = nil + case *[]bool: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + case *map[interface{}]string: + *v = nil + case *map[interface{}]uint: + *v = nil + case *map[interface{}]uint8: + *v = nil + case *map[interface{}]uint16: + *v = nil + case *map[interface{}]uint32: + *v = nil + case *map[interface{}]uint64: + *v = nil + case *map[interface{}]uintptr: + *v = nil + case *map[interface{}]int: + *v = nil + case *map[interface{}]int8: + *v = nil + case *map[interface{}]int16: + *v = nil + case *map[interface{}]int32: + *v = nil + case *map[interface{}]int64: + *v = nil + case *map[interface{}]float32: + *v = nil + case *map[interface{}]float64: + *v = nil + case *map[interface{}]bool: + *v = nil + case *map[string]interface{}: + *v = nil + case *map[string]string: + *v = nil + case *map[string]uint: + *v = nil + case *map[string]uint8: + *v = nil + case *map[string]uint16: + *v = nil + case *map[string]uint32: + *v = nil + case *map[string]uint64: + *v = nil + case *map[string]uintptr: + *v = nil + case *map[string]int: + *v = nil + case *map[string]int8: + *v = nil + case *map[string]int16: + *v = nil + case *map[string]int32: + *v = nil + case *map[string]int64: + *v = nil + case *map[string]float32: + *v = nil + case *map[string]float64: + *v = nil + case *map[string]bool: + *v = nil + case *map[float32]interface{}: + *v = nil + case *map[float32]string: + *v = nil + case *map[float32]uint: + *v = nil + case *map[float32]uint8: + *v = nil + case *map[float32]uint16: + *v = nil + case *map[float32]uint32: + *v = nil + case *map[float32]uint64: + *v = nil + case *map[float32]uintptr: + *v = nil + case *map[float32]int: + *v = nil + case *map[float32]int8: + *v = nil + case *map[float32]int16: + *v = nil + case *map[float32]int32: + *v = nil + case *map[float32]int64: + *v = nil + case *map[float32]float32: + *v = nil + case *map[float32]float64: + *v = nil + case *map[float32]bool: + *v = nil + case *map[float64]interface{}: + *v = nil + case *map[float64]string: + *v = nil + case *map[float64]uint: + *v = nil + case *map[float64]uint8: + *v = nil + case *map[float64]uint16: + *v = nil + case *map[float64]uint32: + *v = nil + case *map[float64]uint64: + *v = nil + case *map[float64]uintptr: + *v = nil + case *map[float64]int: + *v = nil + case *map[float64]int8: + *v = nil + case *map[float64]int16: + *v = nil + case *map[float64]int32: + *v = nil + case *map[float64]int64: + *v = nil + case *map[float64]float32: + *v = nil + case *map[float64]float64: + *v = nil + case *map[float64]bool: + *v = nil + case *map[uint]interface{}: + *v = nil + case *map[uint]string: + *v = nil + case *map[uint]uint: + *v = nil + case *map[uint]uint8: + *v = nil + case *map[uint]uint16: + *v = nil + case *map[uint]uint32: + *v = nil + case *map[uint]uint64: + *v = nil + case *map[uint]uintptr: + *v = nil + case *map[uint]int: + *v = nil + case *map[uint]int8: + *v = nil + case *map[uint]int16: + *v = nil + case *map[uint]int32: + *v = nil + case *map[uint]int64: + *v = nil + case *map[uint]float32: + *v = nil + case *map[uint]float64: + *v = nil + case *map[uint]bool: + *v = nil + case *map[uint8]interface{}: + *v = nil + case *map[uint8]string: + *v = nil + case *map[uint8]uint: + *v = nil + case *map[uint8]uint8: + *v = nil + case *map[uint8]uint16: + *v = nil + case *map[uint8]uint32: + *v = nil + case *map[uint8]uint64: + *v = nil + case *map[uint8]uintptr: + *v = nil + case *map[uint8]int: + *v = nil + case *map[uint8]int8: + *v = nil + case *map[uint8]int16: + *v = nil + case *map[uint8]int32: + *v = nil + case *map[uint8]int64: + *v = nil + case *map[uint8]float32: + *v = nil + case *map[uint8]float64: + *v = nil + case *map[uint8]bool: + *v = nil + case *map[uint16]interface{}: + *v = nil + case *map[uint16]string: + *v = nil + case *map[uint16]uint: + *v = nil + case *map[uint16]uint8: + *v = nil + case *map[uint16]uint16: + *v = nil + case *map[uint16]uint32: + *v = nil + case *map[uint16]uint64: + *v = nil + case *map[uint16]uintptr: + *v = nil + case *map[uint16]int: + *v = nil + case *map[uint16]int8: + *v = nil + case *map[uint16]int16: + *v = nil + case *map[uint16]int32: + *v = nil + case *map[uint16]int64: + *v = nil + case *map[uint16]float32: + *v = nil + case *map[uint16]float64: + *v = nil + case *map[uint16]bool: + *v = nil + case *map[uint32]interface{}: + *v = nil + case *map[uint32]string: + *v = nil + case *map[uint32]uint: + *v = nil + case *map[uint32]uint8: + *v = nil + case *map[uint32]uint16: + *v = nil + case *map[uint32]uint32: + *v = nil + case *map[uint32]uint64: + *v = nil + case *map[uint32]uintptr: + *v = nil + case *map[uint32]int: + *v = nil + case *map[uint32]int8: + *v = nil + case *map[uint32]int16: + *v = nil + case *map[uint32]int32: + *v = nil + case *map[uint32]int64: + *v = nil + case *map[uint32]float32: + *v = nil + case *map[uint32]float64: + *v = nil + case *map[uint32]bool: + *v = nil + case *map[uint64]interface{}: + *v = nil + case *map[uint64]string: + *v = nil + case *map[uint64]uint: + *v = nil + case *map[uint64]uint8: + *v = nil + case *map[uint64]uint16: + *v = nil + case *map[uint64]uint32: + *v = nil + case *map[uint64]uint64: + *v = nil + case *map[uint64]uintptr: + *v = nil + case *map[uint64]int: + *v = nil + case *map[uint64]int8: + *v = nil + case *map[uint64]int16: + *v = nil + case *map[uint64]int32: + *v = nil + case *map[uint64]int64: + *v = nil + case *map[uint64]float32: + *v = nil + case *map[uint64]float64: + *v = nil + case *map[uint64]bool: + *v = nil + case *map[uintptr]interface{}: + *v = nil + case *map[uintptr]string: + *v = nil + case *map[uintptr]uint: + *v = nil + case *map[uintptr]uint8: + *v = nil + case *map[uintptr]uint16: + *v = nil + case *map[uintptr]uint32: + *v = nil + case *map[uintptr]uint64: + *v = nil + case *map[uintptr]uintptr: + *v = nil + case *map[uintptr]int: + *v = nil + case *map[uintptr]int8: + *v = nil + case *map[uintptr]int16: + *v = nil + case *map[uintptr]int32: + *v = nil + case *map[uintptr]int64: + *v = nil + case *map[uintptr]float32: + *v = nil + case *map[uintptr]float64: + *v = nil + case *map[uintptr]bool: + *v = nil + case *map[int]interface{}: + *v = nil + case *map[int]string: + *v = nil + case *map[int]uint: + *v = nil + case *map[int]uint8: + *v = nil + case *map[int]uint16: + *v = nil + case *map[int]uint32: + *v = nil + case *map[int]uint64: + *v = nil + case *map[int]uintptr: + *v = nil + case *map[int]int: + *v = nil + case *map[int]int8: + *v = nil + case *map[int]int16: + *v = nil + case *map[int]int32: + *v = nil + case *map[int]int64: + *v = nil + case *map[int]float32: + *v = nil + case *map[int]float64: + *v = nil + case *map[int]bool: + *v = nil + case *map[int8]interface{}: + *v = nil + case *map[int8]string: + *v = nil + case *map[int8]uint: + *v = nil + case *map[int8]uint8: + *v = nil + case *map[int8]uint16: + *v = nil + case *map[int8]uint32: + *v = nil + case *map[int8]uint64: + *v = nil + case *map[int8]uintptr: + *v = nil + case *map[int8]int: + *v = nil + case *map[int8]int8: + *v = nil + case *map[int8]int16: + *v = nil + case *map[int8]int32: + *v = nil + case *map[int8]int64: + *v = nil + case *map[int8]float32: + *v = nil + case *map[int8]float64: + *v = nil + case *map[int8]bool: + *v = nil + case *map[int16]interface{}: + *v = nil + case *map[int16]string: + *v = nil + case *map[int16]uint: + *v = nil + case *map[int16]uint8: + *v = nil + case *map[int16]uint16: + *v = nil + case *map[int16]uint32: + *v = nil + case *map[int16]uint64: + *v = nil + case *map[int16]uintptr: + *v = nil + case *map[int16]int: + *v = nil + case *map[int16]int8: + *v = nil + case *map[int16]int16: + *v = nil + case *map[int16]int32: + *v = nil + case *map[int16]int64: + *v = nil + case *map[int16]float32: + *v = nil + case *map[int16]float64: + *v = nil + case *map[int16]bool: + *v = nil + case *map[int32]interface{}: + *v = nil + case *map[int32]string: + *v = nil + case *map[int32]uint: + *v = nil + case *map[int32]uint8: + *v = nil + case *map[int32]uint16: + *v = nil + case *map[int32]uint32: + *v = nil + case *map[int32]uint64: + *v = nil + case *map[int32]uintptr: + *v = nil + case *map[int32]int: + *v = nil + case *map[int32]int8: + *v = nil + case *map[int32]int16: + *v = nil + case *map[int32]int32: + *v = nil + case *map[int32]int64: + *v = nil + case *map[int32]float32: + *v = nil + case *map[int32]float64: + *v = nil + case *map[int32]bool: + *v = nil + case *map[int64]interface{}: + *v = nil + case *map[int64]string: + *v = nil + case *map[int64]uint: + *v = nil + case *map[int64]uint8: + *v = nil + case *map[int64]uint16: + *v = nil + case *map[int64]uint32: + *v = nil + case *map[int64]uint64: + *v = nil + case *map[int64]uintptr: + *v = nil + case *map[int64]int: + *v = nil + case *map[int64]int8: + *v = nil + case *map[int64]int16: + *v = nil + case *map[int64]int32: + *v = nil + case *map[int64]int64: + *v = nil + case *map[int64]float32: + *v = nil + case *map[int64]float64: + *v = nil + case *map[int64]bool: + *v = nil + case *map[bool]interface{}: + *v = nil + case *map[bool]string: + *v = nil + case *map[bool]uint: + *v = nil + case *map[bool]uint8: + *v = nil + case *map[bool]uint16: + *v = nil + case *map[bool]uint32: + *v = nil + case *map[bool]uint64: + *v = nil + case *map[bool]uintptr: + *v = nil + case *map[bool]int: + *v = nil + case *map[bool]int8: + *v = nil + case *map[bool]int16: + *v = nil + case *map[bool]int32: + *v = nil + case *map[bool]int64: + *v = nil + case *map[bool]float32: + *v = nil + case *map[bool]float64: + *v = nil + case *map[bool]bool: + *v = nil + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]interface{}) + v2, changed := fastpathTV.DecSliceIntfV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]interface{}, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]interface{}, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = nil + } else { + d.decode(&v[uint(j)]) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]string) + v2, changed := fastpathTV.DecSliceStringV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]string, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]string, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = "" + } else { + v[uint(j)] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float32) + v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]float32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]float32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = float32(chkOvf.Float32V(dd.DecodeFloat64())) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float64) + v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]float64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]float64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeFloat64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint) + v2, changed := fastpathTV.DecSliceUintV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint8) + v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint8) + v2, changed := fastpathTV.DecSliceUint8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) { + v, changed := f.DecSliceUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint8, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]uint8, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint8, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint16) + v2, changed := fastpathTV.DecSliceUint16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint16, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]uint16, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint32) + v2, changed := fastpathTV.DecSliceUint32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]uint32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint64) + v2, changed := fastpathTV.DecSliceUint64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeUint64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uintptr) + v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uintptr, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uintptr, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int) + v2, changed := fastpathTV.DecSliceIntV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int8) + v2, changed := fastpathTV.DecSliceInt8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int8, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]int8, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int16) + v2, changed := fastpathTV.DecSliceInt16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int16, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]int16, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int32) + v2, changed := fastpathTV.DecSliceInt32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]int32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int64) + v2, changed := fastpathTV.DecSliceInt64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeInt64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]bool) + v2, changed := fastpathTV.DecSliceBoolV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]bool, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]bool, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = false + } else { + v[uint(j)] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl new file mode 100644 index 00000000000..7617c435064 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl @@ -0,0 +1,491 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fast-path.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" + +type fastpathT struct {} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [{{ .FastpathLen }}]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + // Note: we use goto (instead of for loop) so this can be inlined. + // h, i, j := 0, 0, len(x) + var h, i uint + var j = uint(len(x)) +LOOP: + if i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(x)) && x[i].rtid == rtid { + return int(i) + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } +func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + var i uint = 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + } + {{/* do not register []uint8 in fast-path */}} + {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey }} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* +*/}}{{end}}{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* +*/}}{{end}}{{end}}{{end}} + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +{{/* +**** removing this block, as they are never called directly **** + + + +**** removing this block, as they are never called directly **** + + + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + + + +**** removing this block, as they are never called directly **** + + + +**** removing this block, as they are never called directly **** +*/}} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { ee.WriteArrayElem() } + {{ encmd .Elem "v2"}} + } + ee.WriteArrayEnd() +} +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + {{ encmd .Elem "v2"}} + } + ee.WriteMapEnd() +} +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + {{if eq .MapKey "interface{}"}}{{/* out of band + */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { ee.WriteMapElemKey() } + e.asis(v2[j].v) + if esep { ee.WriteMapElemValue() } + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + var i uint + for k := range v { + v2[i] = {{ $x }}(k) + i++ + } + sort.Sort({{ sorttype .MapKey false}}(v2)) + for _, k2 := range v2 { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} + if esep { ee.WriteMapElemValue() } + {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} + } {{end}} + } else { + for k2, v2 := range v { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ encmd .MapKey "k2"}}{{end}} + if esep { ee.WriteMapElemValue() } + {{ encmd .Elem "v2"}} + } + } + ee.WriteMapEnd() +} +{{end}}{{end}}{{end}} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + var changed bool + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + case []{{ .Elem }}: + var v2 []{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]{{ .Elem }}: + var v2 []{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) + if changed { + *v = v2 + }{{/* +*/}}{{end}}{{end}}{{end}}{{end}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/* +// maps only change if nil, and in that case, there's no point copying +*/}} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) + case *map[{{ .MapKey }}]{{ .Elem }}: + var v2 map[{{ .MapKey }}]{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) + if changed { + *v = v2 + }{{/* +*/}}{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case *[]{{ .Elem }}: + *v = nil {{/* +*/}}{{end}}{{end}}{{end}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case *map[{{ .MapKey }}]{{ .Elem }}: + *v = nil {{/* +*/}}{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d) + if changed { *vp = v } + } else { + v := rv2i(rv).([]{{ .Elem }}) + v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) + if changed { *vp = v } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { + dd := d.d{{/* + // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() + */}} + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]{{ .Elem }}, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + } else { + xlen = 8 + } + v = make([]{{ .Elem }}, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = {{ zerocmd .Elem }} + } else { + {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem }}{{ end }} + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]{{ .Elem }}, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); + if changed { *vp = v } + } else { + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) + if changed { *vp = v } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, + d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators(){{/* + // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() + */}} + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + {{end}}var mk {{ .MapKey }} + var mv {{ .Elem }} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { dd.ReadMapElemKey() } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if esep { dd.ReadMapElemValue() } + if dd.TryDecodeAsNil() { + if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } + continue + } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { v[mk] = mv } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go new file mode 100644 index 00000000000..cf97db0f27c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + fn := d.h.fn(uint8SliceTyp, true, true) + d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) + return v, true +} + +var fastpathAV fastpathA +var fastpathTV fastpathT + +// ---- +type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl new file mode 100644 index 00000000000..790e914e13c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl @@ -0,0 +1,78 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl new file mode 100644 index 00000000000..8323b54940d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl @@ -0,0 +1,42 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl new file mode 100644 index 00000000000..4249588a3cf --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl @@ -0,0 +1,27 @@ +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go new file mode 100644 index 00000000000..2a7d1aab70b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go @@ -0,0 +1,343 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 10 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl new file mode 100644 index 00000000000..f5d0634e6a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl @@ -0,0 +1,308 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go new file mode 100644 index 00000000000..8b00090a8ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go @@ -0,0 +1,164 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + +const genEncChanTmpl = ` +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} +` diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen.go new file mode 100644 index 00000000000..74c4aa86af7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen.go @@ -0,0 +1,2149 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// - MissingFielder implementation. +// If a type implements MissingFielder, it is completely ignored by codecgen. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +// v7: +// v8: current - we now maintain compatibility with old generated code. +// v9: skipped +// v10: modified encDriver and decDriver interfaces. Remove deprecated methods after Jan 1, 2019 +const genVersion = 10 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + errGenAllTypesSamePkg = errors.New("All types must be in the same package") + errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") + + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +type genBuf struct { + buf []byte +} + +func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) v() string { return string(x.buf) } +func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } +func (x *genBuf) reset() { + if x.buf != nil { + x.buf = x.buf[:0] + } +} + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(errGenAllTypesSamePkg) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// Code generated by codecgen - DO NOT EDIT. + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + if k == x.imn[k] { + x.linef("\"%s\"", k) + } else { + x.linef("%s \"%s\"", x.imn[k], k) + } + } + // add required packages + for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) + x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) + x.linef("// ----- value types used ----") + for _, vt := range [...]valueType{ + valueTypeArray, valueTypeMap, valueTypeString, + valueTypeInt, valueTypeUint, valueTypeFloat} { + x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) + } + + x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) + x.line(")") + x.line("var (") + x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) + // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) + x.linef("}") + x.line("if false { var _ byte = 0; // reference the types, but skip this branch at build/run time") + // x.line("_ = strconv.ParseInt") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + _, err := io.WriteString(x.w, s) + if err != nil { + panic(err) + } +} + +func (x *genRunner) outf(s string, params ...interface{}) { + _, err := fmt.Fprintf(x.w, s, params...) + if err != nil { + panic(err) + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.outf(s, params...) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs/arrays always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) + x.varsfxreset() + + fnSigPfx := "func (" + genTopLevelVarName + " " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + x.out(fnSigPfx) + + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0, true) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + + switch t.Kind() { + case reflect.Ptr: + telem := t.Elem() + tek := telem.Kind() + if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { + x.enc(varname, genNonPtr(t)) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + case reflect.Struct, reflect.Array: + if t == timeTyp { + x.enc(varname, t) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, where t represents T. +// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T +// (to prevent copying), +// else t is of type T +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + // tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T + // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if ti2.cs { // t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if ti2.csp { // tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == timeTyp { + x.linef("} else if !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", varname) + // return + } + if t == rawTyp { + x.linef("} else { z.EncRaw(%s)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%s, e)", varname) + return + } + // only check for extensions if the type is named, and has a packagePath. + var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + if arrayOrStruct { // varname is of type *T + if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } + if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } + } else { // varname is of type T + if ti2.bm { // t.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) + } + if ti2.jm { // t.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) + } else if ti2.tm { // t.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.linef("if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw(z.BytesView(string(%s))) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, string(%s)) }", varname, x.xs, varname) + case reflect.Chan: + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, t, true, true) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.linef(`if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw([]byte{}) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, "") }`, x.xs) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { + // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + varname2 := varname + "." + t2.Name + switch t2.Type.Kind() { + case reflect.Struct: + rtid2 := rt2id(t2.Type) + ti2 := x.ti.get(rtid2, t2.Type) + // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) + if ti2.rtid == timeTypId { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagComparable) { + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + break + } + // buf.s("(") + buf.s("false") + for i, n := 0, t2.Type.NumField(); i < n; i++ { + f := t2.Type.Field(i) + if f.PkgPath != "" { // unexported + continue + } + buf.s(" || ") + x.encOmitEmptyLine(f, varname2, buf) + } + //buf.s(")") + case reflect.Bool: + buf.s(varname2) + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + buf.s("len(").s(varname2).s(") != 0") + default: + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) + + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + + // var nn int + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) + + for j, si := range tisfi { + _ = j + if !si.omitEmpty() { + // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) + x.linef("true, // %s", si.fieldName) + // nn++ + continue + } + var t2 reflect.StructField + var omitline genBuf + { + t2typ := t + varname3 := varname + // go through the loop, record the t2 field explicitly, + // and gather the omit line if embedded in pointers. + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + // do not include actual field in the omit line. + // that is done subsequently (right after - below). + if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { + omitline.s(varname3).s(" != nil && ") + } + } + } + x.encOmitEmptyLine(t2, varname, &omitline) + x.linef("%s, // %s", omitline.v(), si.fieldName) + } + x.line("}") + x.linef("_ = %s", numfieldsvar) + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + // nn = 0 + // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + } + x.line("r.WriteArrayElem()") + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty() { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") + + // emulate EncStructFieldKey + switch ti.keyType { + case valueTypeInt: + x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) + case valueTypeUint: + x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) + case valueTypeFloat: + x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) + default: // string + if si.encNameAsciiAlphaNum { + x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName) + } + x.linef("r.EncodeStringEnc(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) + if si.encNameAsciiAlphaNum { + x.linef("}") + } + } + // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) + x.line("r.WriteMapElemValue()") + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty() { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") + x.line("} else {") + x.line("r.WriteMapEnd()") + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + elemBytes := t.Elem().Kind() == reflect.Uint8 + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) + return + } + if t.Kind() == reflect.Array && elemBytes { + x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) + return + } + i := x.varsfx() + if t.Kind() == reflect.Chan { + type ts struct { + Label, Chan, Slice, Sfx string + } + tm, err := template.New("").Parse(genEncChanTmpl) + if err != nil { + panic(err) + } + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) + err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) + if err != nil { + panic(err) + } + // x.linef("%s = sch%s", varname, i) + if elemBytes { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) + x.line("}") + return + } + varname = "sch" + i + } + + x.line("r.WriteArrayStart(len(" + varname + "))") + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") + + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") + if t.Kind() == reflect.Chan { + x.line("}") + } +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.line("r.WriteMapElemKey()") + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") +} + +func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, + newbuf, nilbuf *genBuf) (t2 reflect.StructField) { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + t2kind := t2typ.Kind() + var nilbufed bool + if si != nil { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + t2kind = t2typ.Kind() + if t2kind != reflect.Ptr { + continue + } + if newbuf != nil { + newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + if nilbuf != nil { + if !nilbufed { + nilbuf.s("if true") + nilbufed = true + } + nilbuf.s(" && ").s(varname3).s(" != nil") + } + } + } + // if t2typ.Kind() == reflect.Ptr { + // varname3 = varname3 + t2.Name + // } + if nilbuf != nil { + if nilbufed { + nilbuf.s(" { ") + } + if nilvar != "" { + nilbuf.s(nilvar).s(" = true") + } else if tk := t2typ.Kind(); tk == reflect.Ptr { + if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { + nilbuf.s(varname3).s(" = nil") + } else { + nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) + } + } else { + nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) + } + if nilbufed { + nilbuf.s("}") + } + } + return t2 +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + var varname2 string + if t.Kind() != reflect.Ptr { + if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { + x.dec(varname, t, false) + } + } else { + if checkNotNil { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + } + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + if checkNotNil { + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + } + // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. + + if ptrPfx == "" { + x.dec(varname, t, true) + } else { + varname2 = genTempVarPfx + "z" + rand + x.line(varname2 + " := " + ptrPfx + varname) + x.dec(varname2, t, true) + } + } +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { + i := x.varsfx() + + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + + if canBeNil { + var buf genBuf + x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) + x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) + } else { + x.line("// cannot be nil") + } + + x.decVarMain(varname, i, t, checkNotNil) + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + mi := x.varsfx() + // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + // x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if t == timeTyp { + x.linef("} else if !z.DecBasicHandle().TimeNotBuiltin { %s%v = r.DecodeTime()", ptrPfx, varname) + // return + } + if t == rawTyp { + x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) + return + } + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) + return + } + + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + + if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) + } + if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) + } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) + } + + x.line("} else {") + + if x.decTryAssignPrimitive(varname, t, isptr) { + return + } + + switch t.Kind() { + case reflect.Array, reflect.Chan: + x.xtraSM(varname, t, false, isptr) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", + ptrPfx, varname, ptrPfx, ptrPfx, varname) + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + // no need to create temp variable if isptr, or x.F or x[F] + if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { + x.decStruct(varname, rtid, t) + } else { + varname2 := genTempVarPfx + "j" + mi + x.line(varname2 + " := &" + varname) + x.decStruct(varname2, rtid, t) + } + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + addrPfx + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + addrPfx + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + var ptr string + if isptr { + ptr = "*" + } + switch t.Kind() { + case reflect.Int: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Int8: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Int16: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Int32: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Int64: + x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) + + case reflect.Uint: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Uint8: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Uint16: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Uint32: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Uint64: + x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) + case reflect.Uintptr: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + + case reflect.Float32: + x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) + case reflect.Float64: + x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) + + case reflect.Bool: + x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) + case reflect.String: + x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) + default: + return false + } + return true +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false, true) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + ti := x.ti.get(rtid, t) + i := x.varsfx() + kName := tpfx + "s" + i + + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") + + // emulate decstructfieldkey + switch ti.keyType { + case valueTypeInt: + x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) + case valueTypeUint: + x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) + case valueTypeFloat: + x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) + default: // string + x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) + } + // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) + + x.line("r.ReadMapElemValue()") + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + x.line("r.ReadArrayElem()") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // varname MUST be a ptr, or a struct field or a slice element. + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = genStripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +var genInternalNonZeroValueIdx [5]uint64 +var genInternalNonZeroValueStrs = [2][5]string{ + {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, + {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + genInternalNonZeroValueIdx[0]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity + case "bool": + genInternalNonZeroValueIdx[1]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] + case "string": + genInternalNonZeroValueIdx[2]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] + case "float32", "float64", "float", "double": + genInternalNonZeroValueIdx[3]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] + default: + genInternalNonZeroValueIdx[4]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "if e.h.StringToRaw { ee.EncodeStringBytesRaw(bytesView(" + vname + ")) " + + "} else { ee.EncodeStringEnc(cUTF8, " + vname + ") }" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + // case "symbol": + // return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "uint8": + return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" + case "uint16": + return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" + case "uint32": + return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" + case "uint64": + return "dd.DecodeUint64()" + case "uintptr": + return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "int": + return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" + case "int8": + return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" + case "int16": + return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" + case "int32": + return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" + case "int64": + return "dd.DecodeInt64()" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" + case "float64": + return "dd.DecodeFloat64()" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func genStripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + // } + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go new file mode 100644 index 00000000000..9ddbe205933 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go new file mode 100644 index 00000000000..c5fcd6697ef --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go new file mode 100644 index 00000000000..bc39d6b719f --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go new file mode 100644 index 00000000000..cde4cd37251 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go new file mode 100644 index 00000000000..794133a3cbb --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = false diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go new file mode 100644 index 00000000000..fd92ede3558 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = true diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 00000000000..8debfa61371 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go new file mode 100644 index 00000000000..0f1bb01e5a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go new file mode 100644 index 00000000000..2fb4b057ddc --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6,!go1.7 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go new file mode 100644 index 00000000000..c5b81550539 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +const genCheckVendor = true diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go new file mode 100644 index 00000000000..837cf240bae --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go index 7da3955edc9..228ad93d159 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -1,68 +1,200 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec // Contains code shared by both encode and decode. +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + import ( + "bytes" + "encoding" "encoding/binary" + "errors" "fmt" + "io" "math" "reflect" "sort" + "strconv" "strings" "sync" + "sync/atomic" "time" - "unicode" - "unicode/utf8" ) const ( - structTagName = "codec" + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. - // Support - // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) - // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // Support encoding.(Binary|Text)(Unm|M)arshaler. // This constant flag will enable or disable it. - supportBinaryMarshal = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - useMapForCodecCache = false - - // For some common container types, we can short-circuit an elaborate - // reflection dance and call encode/decode directly. - // The currently supported types are: - // - slices of strings, or id's (int64,uint64) or interfaces. - // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf - shortCircuitReflectToFastPath = true + supportMarshalInterfaces = true // for debugging, set this to false, to catch panic traces. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + // arrayCacheLen = 8 + + // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 + // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. + cacheLineSize = 64 - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false + wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize + wordSize = wordSizeBits / 8 + + // so structFieldInfo fits into 8 bytes + maxLevelsEmbedding = 14 + + // useFinalizers=true configures finalizers to release pool'ed resources + // acquired by Encoder/Decoder during their GC. + // + // Note that calling SetFinalizer is always expensive, + // as code must be run on the systemstack even for SetFinalizer(t, nil). + // + // We document that folks SHOULD call Release() when done, or they can + // explicitly call SetFinalizer themselves e.g. + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + useFinalizers = false ) +var oneByteArr [1]byte +var zeroByteSlice = oneByteArr[:0:0] + +var codecgen bool + +var refBitset bitset256 +var pool pooler +var panicv panicHdl + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +type clsErr struct { + closed bool // is it closed? + errClosed error // error on closing +} + +// type entryType uint8 + +// const ( +// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap +// entryTypeIo +// entryTypeBufio +// entryTypeUnset = 255 +// ) + type charEncoding uint8 const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE + _ charEncoding = iota // make 0 unset + cUTF8 + cUTF16LE + cUTF16BE + cUTF32LE + cUTF32BE + // Deprecated: not a true char encoding value + cRAW charEncoding = 255 ) // valueType is the stream type @@ -80,517 +212,2508 @@ const ( valueTypeBytes valueTypeMap valueTypeArray - valueTypeTimestamp + valueTypeTime valueTypeExt - valueTypeInvalid = 0xff + // valueTypeInvalid = 0xff +) + +var valueTypeStrings = [...]string{ + "Unset", + "Nil", + "Int", + "Uint", + "Float", + "Bool", + "String", + "Symbol", + "Bytes", + "Map", + "Array", + "Timestamp", + "Ext", +} + +func (x valueType) String() string { + if int(x) < len(valueTypeStrings) { + return valueTypeStrings[x] + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +// type sfiIdx struct { +// name string +// index int +// } + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// - even Java's PMD rules set TooManyFields threshold to 15. +// However, go has embedded fields, which should be regarded as +// top level, allowing structs to possibly double or triple. +// In addition, we don't want to keep creating transient arrays, +// especially for the sfi index tracking, and the evtypes tracking. +// +// So - try to keep typeInfoLoadArray within 2K bytes +const ( + typeInfoLoadArraySfisLen = 16 + typeInfoLoadArraySfiidxLen = 8 * 112 + typeInfoLoadArrayEtypesLen = 12 + typeInfoLoadArrayBLen = 8 * 4 ) +type typeInfoLoad struct { + // fNames []string + // encNames []string + etypes []uintptr + sfis []structFieldInfo +} + +type typeInfoLoadArray struct { + // fNames [typeInfoLoadArrayLen]string + // encNames [typeInfoLoadArrayLen]string + sfis [typeInfoLoadArraySfisLen]structFieldInfo + sfiidx [typeInfoLoadArraySfiidxLen]byte + etypes [typeInfoLoadArrayEtypesLen]uintptr + b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package + +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +type isZeroer interface { + IsZero() bool +} + +type codecError struct { + name string + err interface{} +} + +func (e codecError) Cause() error { + switch xerr := e.err.(type) { + case nil: + return nil + case error: + return xerr + case string: + return errors.New(xerr) + case fmt.Stringer: + return errors.New(xerr.String()) + default: + return fmt.Errorf("%v", e.err) + } +} + +func (e codecError) Error() string { + return fmt.Sprintf("%s error: %v", e.name, e.err) +} + +// type byteAccepter func(byte) bool + var ( bigen = binary.BigEndian structInfoFieldName = "_struct" - cachedTypeInfo = make(map[uintptr]*typeInfo, 4) - cachedTypeInfoMutex sync.RWMutex - - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - strSliceTyp = reflect.TypeOf([]string(nil)) - boolSliceTyp = reflect.TypeOf([]bool(nil)) - uintSliceTyp = reflect.TypeOf([]uint(nil)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uint16SliceTyp = reflect.TypeOf([]uint16(nil)) - uint32SliceTyp = reflect.TypeOf([]uint32(nil)) - uint64SliceTyp = reflect.TypeOf([]uint64(nil)) - intSliceTyp = reflect.TypeOf([]int(nil)) - int8SliceTyp = reflect.TypeOf([]int8(nil)) - int16SliceTyp = reflect.TypeOf([]int16(nil)) - int32SliceTyp = reflect.TypeOf([]int32(nil)) - int64SliceTyp = reflect.TypeOf([]int64(nil)) - float32SliceTyp = reflect.TypeOf([]float32(nil)) - float64SliceTyp = reflect.TypeOf([]float64(nil)) - - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) - - mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) - mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) - mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) - mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() - - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() - - boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() - uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() - uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() - uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() - intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() - int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() - int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() - int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() - int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() - float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() - float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() - - mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() - mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() - mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() - mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() - // Id = reflect.ValueOf().Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() - binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uintptrTyp = reflect.TypeOf(uintptr(0)) + uint8Typ = reflect.TypeOf(uint8(0)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uintTyp = reflect.TypeOf(uint(0)) + intTyp = reflect.TypeOf(int(0)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + + uint8TypId = rt2id(uint8Typ) + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize = uint8(intTyp.Bits()) + uintBitsize = uint8(uintTyp.Bits()) + + // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") ) -type binaryUnmarshaler interface { - UnmarshalBinary(data []byte) error +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +// +// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. +// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. +// For example, the snippet below will cause such an error. +// type testSelferRecur struct{} +// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } +// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } +// +// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. +// This is because, during each decode, we first check the the next set of bytes +// represent nil, and if so, we just set the value to nil. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) } -type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) +// MissingFielder defines the interface allowing structs to internally decode or encode +// values which do not map to struct fields. +// +// We expect that this interface is bound to a pointer type (so the mutation function works). +// +// A use-case is if a version of a type unexports a field, but you want compatibility between +// both versions during encoding and decoding. +// +// Note that the interface is completely ignored during codecgen. +type MissingFielder interface { + // CodecMissingField is called to set a missing field and value pair. + // + // It returns true if the missing field was set on the struct. + CodecMissingField(field []byte, value interface{}) bool + + // CodecMissingFields returns the set of fields which are not struct fields + CodecMissingFields() map[string]interface{} } -// MapBySlice represents a slice which should be encoded as a map in the stream. +// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. // The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// Example usage: +// type T1 []string // or []int or []Point or any other "slice" type +// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map +// type T2 struct { KeyValues T1 } +// +// var kvs = []string{"one", "1", "two", "2", "three", "3"} +// var v2 = T2{ KeyValues: T1(kvs) } +// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice type MapBySlice interface { MapBySlice() } -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// // BasicHandle encapsulates the common options and extension functions. +// +// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. type BasicHandle struct { + // BasicHandle is always a part of a different type. + // It doesn't have to fit into it own cache lines. + + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). + // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. + // Thses slices are used all the time, so keep as slices (not pointers). + extHandle - EncodeOptions - DecodeOptions -} -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - writeExt() bool - getBasicHandle() *BasicHandle - newEncDriver(w encWriter) encDriver - newDecDriver(r decReader) decDriver -} + intf2impls -// RawExt represents raw unprocessed extension data. -type RawExt struct { - Tag byte - Data []byte -} + inited uint32 + _ uint32 // padding -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag byte - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} + // ---- cache line -type extHandle []*extTypeTagFn + RPCOptions -// AddExt registers an encode and decode function for a reflect.Type. -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, - tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error, -) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } + // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. + // + // All Handlers should know how to encode/decode time.Time as part of the core + // format specification, or as a standard extension defined by the format. + // + // However, users can elect to handle time.Time as a custom extension, or via the + // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. + // To elect this behavior, users can set TimeNotBuiltin=true. + // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior + // (for Cbor and Msgpack), where time.Time was not a builtin supported type. + TimeNotBuiltin bool + + // ExplicitRelease configures whether Release() is implicitly called after an encode or + // decode call. + // + // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) + // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, + // then you do not want it to be implicitly closed after each Encode/Decode call. + // Doing so will unnecessarily return resources to the shared pool, only for you to + // grab them right after again to do another Encode/Decode call. + // + // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when + // you are truly done. + // + // As an alternative, you can explicitly set a finalizer - so its resources + // are returned to the shared pool before it is garbage-collected. Do it as below: + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + ExplicitRelease bool - // o cannot be nil, since it is always embedded in a Handle. - // if nil, let it panic. - // if o == nil { - // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") - // return - // } + be bool // is handle a binary encoding? + js bool // is handle javascript handler? + n byte // first letter of handle name + _ uint16 // padding - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.encFn, v.decFn = tag, encfn, decfn - return - } - } + // ---- cache line - *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) - return -} + DecodeOptions -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - for _, v := range o { - if v.rtid == rtid { - return v - } - } - return nil -} + // ---- cache line -func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { - for _, v := range o { - if v.tag == tag { - return v - } - } - return nil -} + EncodeOptions -func (o extHandle) getDecodeExtForTag(tag byte) ( - rv reflect.Value, fn func(reflect.Value, []byte) error) { - if x := o.getExtForTag(tag); x != nil { - // ext is only registered for base - rv = reflect.New(x.rt).Elem() - fn = x.decFn - } - return + // noBuiltInTypeChecker + + rtidFns atomicRtidFnSlice + mu sync.Mutex + // r []uintptr // rtids mapped to s above } -func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.decFn +// basicHandle returns an initialized BasicHandle from the Handle. +func basicHandle(hh Handle) (x *BasicHandle) { + x = hh.getBasicHandle() + // ** We need to simulate once.Do, to ensure no data race within the block. + // ** Consequently, below would not work. + // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { + // x.be = hh.isBinary() + // _, x.js = hh.(*JsonHandle) + // x.n = hh.Name()[0] + // } + + // simulate once.Do using our own stored flag and mutex as a CompareAndSwap + // is not sufficient, since a race condition can occur within init(Handle) function. + // init is made noinline, so that this function can be inlined by its caller. + if atomic.LoadUint32(&x.inited) == 0 { + x.init(hh) } return } -func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.encFn +//go:noinline +func (x *BasicHandle) init(hh Handle) { + // make it uninlineable, as it is called at most once + x.mu.Lock() + if x.inited == 0 { + x.be = hh.isBinary() + _, x.js = hh.(*JsonHandle) + x.n = hh.Name()[0] + atomic.StoreUint32(&x.inited, 1) } - return + x.mu.Unlock() } -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} - // tag string // tag - // name string // field name - // encNameBs []byte // encoded name as byte stream - // ikind int // kind of the field as an int i.e. int(reflect.Kind) +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) } -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - if fname == "" { - panic("parseStructFieldInfo: No Field Name") +func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP } - si := structFieldInfo{ - // name: fname, - encName: fname, - // tag: stag, + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn } + return +} - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s +func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + sp := x.rtidFns.load() + if sp != nil { + if _, fn = findFn(sp, rtid); fn != nil { + // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + return + } + } + c := x + // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + fn = new(codecFn) + fi := &(fn.i) + ti := c.getTypeInfo(rtid, rt) + fi.ti = ti + + rk := reflect.Kind(ti.kind) + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrF = true + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == timeTypId && !c.TimeNotBuiltin { + fn.fe = (*Encoder).kTime + fn.fd = (*Decoder).kTime + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrF = true + fi.addrD = true + fi.addrE = true + } else if xfFn := c.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrF = true + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrF = true + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrF = true + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrF = true + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if ti.pkgpath == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + fi.addrF = false } } else { - switch s { - case "omitempty": - si.omitEmpty = true - case "toarray": - si.toArray = true + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(ti.key, ti.elem) + } else { + rtu = reflect.SliceOf(ti.elem) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + fi.addrF = false // meaning it can be an address(ptr) or a value + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } else { + xfnf2(d, xf, xrv.Convert(xrt)) + } + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt8 + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt16 + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt32 + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt64 + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint8 + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint16 + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint32 + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint64 + fn.fd = (*Decoder).kUint64 + case reflect.Uintptr: + fn.fe = (*Encoder).kUintptr + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + fn.fd = (*Decoder).kErr + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrF = false + fi.addrD = false + rt2 := reflect.SliceOf(ti.elem) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty || ti.mf || ti.mfp { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty } + fn.fd = (*Decoder).kStruct + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + fn.fe = (*Encoder).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr } } } - // si.encNameBs = []byte(si.encName) - return &si -} -type sfiSortedByEncName []*structFieldInfo + c.mu.Lock() + var sp2 []codecRtidFn + sp = c.rtidFns.load() + if sp == nil { + sp2 = []codecRtidFn{{rtid, fn}} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) + } else { + idx, fn2 := findFn(sp, rtid) + if fn2 == nil { + sp2 = make([]codecRtidFn, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = codecRtidFn{rtid, fn} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) -func (p sfiSortedByEncName) Len() int { - return len(p) + } + } + c.mu.Unlock() + return } -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName +// Handle defines a specific encoding format. It also stores any runtime state +// used during an Encoding or Decoding session e.g. stored state about Types, etc. +// +// Once a handle is configured, it can be shared across multiple Encoders and Decoders. +// +// Note that a Handle is NOT safe for concurrent modification. +// Consequently, do not modify it after it is configured if shared among +// multiple Encoders and Decoders in different goroutines. +// +// Consequently, the typical usage model is that a Handle is pre-configured +// before first time use, and not modified while in use. +// Such a pre-configured Handle is safe for concurrent access. +type Handle interface { + Name() string + // return the basic handle. It may not have been inited. + // Prefer to use basicHandle() helper function that ensures it has been inited. + getBasicHandle() *BasicHandle + recreateEncDriver(encDriver) bool + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + // IsBuiltinType(rtid uintptr) bool } -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and retrieve the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour +// behind an Encode flag which must be explicitly set. +type Raw []byte -// typeInfo keeps information about each type referenced in the encode/decode sequence. +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt +// if there is no registered extension for the tag. // -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. +// Only one of Data or Value is nil. +// If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which leverage the format to do + // custom serialization of the types. + Value interface{} +} - rt reflect.Type - rtid uintptr +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + WriteExt(v interface{}) []byte - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - m bool // base type (T or *T) is a binaryMarshaler - unm bool // base type (T or *T) is a binaryUnmarshaler - mIndir int8 // number of indirections to get to binaryMarshaler type - unmIndir int8 // number of indirections to get to binaryUnmarshaler type - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 + // ReadExt updates a value from a []byte. + // + // Note: dst is always a pointer kind to the registered extension type. + ReadExt(dst interface{}, src []byte) } -func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - cachedTypeInfoMutex.RLock() - pti, ok = cachedTypeInfo[rtid] - cachedTypeInfoMutex.RUnlock() - if ok { - return - } +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding + // e.g. convert time.Time to int64. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + ConvertExt(v interface{}) interface{} - cachedTypeInfoMutex.Lock() - defer cachedTypeInfoMutex.Unlock() - if pti, ok = cachedTypeInfo[rtid]; ok { - return - } + // UpdateExt updates a value from a simpler interface for easy decoding + // e.g. convert int64 to time.Time. + // + // Note: dst is always a pointer kind to the registered extension type. + UpdateExt(dst interface{}, src interface{}) +} - ti := typeInfo{rt: rt, rtid: rtid} - pti = &ti +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.m, ti.mIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.unm, ti.unmIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) - - // // try to put all si close together - // const tryToPutAllStructFieldInfoTogether = true - // if tryToPutAllStructFieldInfoTogether { - // sfip2 := make([]structFieldInfo, len(sfip)) - // for i, si := range sfip { - // sfip2[i] = *si - // } - // for i := range sfip { - // sfip[i] = &sfip2[i] - // } - // } - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - cachedTypeInfo[rtid] = pti - return + return bs } -func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - // for rt.Kind() == reflect.Ptr { - // // indexstack = append(indexstack, 0) - // rt = rt.Elem() - // } - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - stag := f.Tag.Get(structTagName) - if stag == "-" { - continue - } - if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - continue - } - // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. - if f.Anonymous && stag == "" { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - si := parseStructFieldInfo(f.Name, stag) - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) } } -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) } -func doPanic(tag string, format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = tag - copy(params2[1:], params) - panic(fmt.Errorf("%s: "+format, params2...)) +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) } -func checkOverflowFloat32(f float64, doCheck bool) { - if !doCheck { - return - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() - f2 := f - if f2 < 0 { - f2 = -f - } - if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { - decErr("Overflow float32 value: %v", f2) - } +type extWrapper struct { + BytesExt + InterfaceExt } -func checkOverflow(ui uint64, i int64, bitsize uint8) { - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize == 0 { +type bytesExtFailer struct{} + +func (bytesExtFailer) WriteExt(v interface{}) []byte { + panicv.errorstr("BytesExt.WriteExt is not supported") + return nil +} +func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { + panicv.errorstr("BytesExt.ReadExt is not supported") +} + +type interfaceExtFailer struct{} + +func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { + panicv.errorstr("InterfaceExt.ConvertExt is not supported") + return nil +} +func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { + panicv.errorstr("InterfaceExt.UpdateExt is not supported") +} + +type binaryEncodingType struct{} + +func (binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +// type noBuiltInTypeChecker struct{} +// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } +// type noBuiltInTypes struct{ noBuiltInTypeChecker } + +type noBuiltInTypes struct{} + +func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (noStreamingCodec) CheckBreak() bool { return false } +// func (noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (noElemSeparators) hasElemSeparators() (v bool) { return } +func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w *encWriterSwitch +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rtidptr uintptr + rt reflect.Type + tag uint64 + ext Ext + _ [1]uint64 // padding +} + +type extHandle []extTypeTagFn + +// AddExt registes an encode and decode function for a reflect.Type. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) AddExt(rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// SetExt will set the extension for a tag and reflect.Type. +// Note that the type must be a named type, and specifically not a pointer or Interface. +// An error is returned if that is not honored. +// To Deregister an ext, call SetExt with nil Ext. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + rk := rt.Kind() + for rk == reflect.Ptr { + rt = rt.Elem() + rk = rt.Kind() + } + + if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { + return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) + } + + rtid := rt2id(rt) + switch rtid { + case timeTypId, rawTypId, rawExtTypId: + // all natively supported type, so cannot have an extension + return // TODO: should we silently ignore, or return an error??? + } + // if o == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + o2 := *o + // if o2 == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + rtidptr := rt2id(reflect.PtrTo(rt)) + *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) + return +} + +func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.rtid == rtid || v.rtidptr == rtid { + return + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.tag == tag { + return + } + } + return nil +} + +type intf2impl struct { + rtid uintptr // for intf + impl reflect.Type + // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. +} + +type intf2impls []intf2impl + +// Intf2Impl maps an interface to an implementing type. +// This allows us support infering the concrete type +// and populating it when passed an interface. +// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. +// +// Passing a nil impl will clear the mapping. +func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { + if impl != nil && !impl.Implements(intf) { + return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) + } + rtid := rt2id(intf) + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.impl = impl + return + } + } + *o = append(o2, intf2impl{rtid, impl}) + return +} + +func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { + for i := range o { + v := &o[i] + if v.rtid == rtid { + if v.impl == nil { + return + } + if v.impl.Kind() == reflect.Ptr { + return reflect.New(v.impl.Elem()) + } + return reflect.New(v.impl).Elem() + } + } + return +} + +type structFieldInfoFlag uint8 + +const ( + _ structFieldInfoFlag = 1 << iota + structFieldInfoFlagReady + structFieldInfoFlagOmitEmpty +) + +func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { + *x = *x | f +} + +func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { + *x = *x &^ f +} + +func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { + return x&f != 0 +} + +func (x structFieldInfoFlag) omitEmpty() bool { + return x.flagGet(structFieldInfoFlagOmitEmpty) +} + +func (x structFieldInfoFlag) ready() bool { + return x.flagGet(structFieldInfoFlagReady) +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + + encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers + structFieldInfoFlag + _ [1]byte // padding +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { +// v, _ = si.field(v, update) +// return v +// } + +func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { + keytype = valueTypeString // default + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + } else { + switch s { + case "omitempty": + omitEmpty = true + case "toarray": + toArray = true + case "int": + keytype = valueTypeInt + case "uint": + keytype = valueTypeUint + case "float": + keytype = valueTypeFloat + // case "bool": + // keytype = valueTypeBool + case "string": + keytype = valueTypeString + } + } + } + return +} + +func (si *structFieldInfo) parseTag(stag string) { + // if fname == "" { + // panic(errNoFieldNameToStructFieldInfo) + // } + + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.flagSet(structFieldInfoFlagOmitEmpty) + // si.omitEmpty = true + // case "toarray": + // si.toArray = true + } + } + } +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { return len(p) } +func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } +func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +type typeInfoFlag uint8 + +const ( + typeInfoFlagComparable = 1 << iota + typeInfoFlagIsZeroer + typeInfoFlagIsZeroerPtr +) + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + rt reflect.Type + elem reflect.Type + pkgpath string + + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + kind uint8 + chandir uint8 + + anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice + + // ---- cpu cache line boundary? + sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + key reflect.Type + + // ---- cpu cache line boundary? + // sfis []structFieldInfo // all sfi, in src order, as created. + sfiNamesSort []byte // all names, with indexes into the sfiSort + + // format of marshal type fields below: [btj][mu]p? OR csp? + + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer + mf bool // T is a MissingFielder + mfp bool // *T is a MissingFielder + + // other flags, with individual bits representing if set. + flags typeInfoFlag + infoFieldOmitempty bool + + _ [6]byte // padding + _ [2]uint64 // padding +} + +func (ti *typeInfo) isFlag(f typeInfoFlag) bool { + return ti.flags&f != 0 +} + +func (ti *typeInfo) indexForEncName(name []byte) (index int16) { + var sn []byte + if len(name)+2 <= 32 { + var buf [32]byte // should not escape to heap + sn = buf[:len(name)+2] + } else { + sn = make([]byte, len(name)+2) + } + copy(sn[1:], name) + sn[0], sn[len(sn)-1] = tiSep2(name), 0xff + j := bytes.Index(ti.sfiNamesSort, sn) + if j < 0 { + return -1 + } + index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) + return +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected + infos atomicTypeInfoSlice + mu sync.Mutex + tags []string + _ [2]uint64 // padding +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // if sp == nil { + // return -1, nil + // } + // s := *sp + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + ti = s[i].ti + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + if sp != nil { + _, pti = findTypeInfo(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{ + rt: rt, + rtid: rtid, + kind: uint8(rk), + pkgpath: rt.PkgPath(), + keyType: valueTypeString, // default it - so it's never 0 + } + // ti.rv0 = reflect.Zero(rt) + + // ti.comparable = rt.Comparable() + ti.numMeth = uint16(rt.NumMethod()) + + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) + + b1, b2 := implIntf(rt, iszeroTyp) + if b1 { + ti.flags |= typeInfoFlagIsZeroer + } + if b2 { + ti.flags |= typeInfoFlagIsZeroerPtr + } + if rt.Comparable() { + ti.flags |= typeInfoFlagComparable + } + + switch rk { + case reflect.Struct: + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) + ti.infoFieldOmitempty = omitEmpty + } else { + ti.keyType = valueTypeString + } + pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + // ti.sfis = vv.sfis + ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) + pp.Put(pi) + case reflect.Map: + ti.elem = rt.Elem() + ti.key = rt.Key() + case reflect.Slice: + ti.mbs, _ = implIntf(rt, mapBySliceTyp) + ti.elem = rt.Elem() + case reflect.Chan: + ti.elem = rt.Elem() + ti.chandir = uint8(rt.ChanDir()) + case reflect.Array, reflect.Ptr: + ti.elem = rt.Elem() + } + // sfi = sfiSrc + + x.mu.Lock() + sp = x.infos.load() + var sp2 []rtid2ti + if sp == nil { + pti = &ti + sp2 = []rtid2ti{{rtid, pti}} + x.infos.store(sp2) + } else { + var idx uint + idx, pti = findTypeInfo(sp, rtid) + if pti == nil { + pti = &ti + sp2 = make([]rtid2ti, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = rtid2ti{rtid, pti} + x.infos.store(sp2) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< %v fields are not supported - has %v fields", + (1<= 0; i-- { // bounds-check elimination + b := si.encName[i] + if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { + continue + } + si.encNameAsciiAlphaNum = false + break + } + si.fieldName = f.Name + si.flagSet(structFieldInfoFlagReady) + + // pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) > maxLevelsEmbedding-1 { + panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", + maxLevelsEmbedding-1, len(indexstack)) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.flagSet(structFieldInfoFlagOmitEmpty) + } + pv.sfis = append(pv.sfis, si) + } +} + +func tiSep(name string) uint8 { + // (xn[0]%64) // (between 192-255 - outside ascii BMP) + // return 0xfe - (name[0] & 63) + // return 0xfe - (name[0] & 63) - uint8(len(name)) + // return 0xfe - (name[0] & 63) - uint8(len(name)&63) + // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +func tiSep2(name []byte) uint8 { + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( + y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { + sa := pv.sfiidx[:0] + sn := pv.b[:] + n := len(x) + + var xn string + var ui uint16 + var sep byte + + for i := range x { + ui = uint16(i) + xn = x[i].encName // fieldName or encName? use encName for now. + if len(xn)+2 > cap(pv.b) { + sn = make([]byte, len(xn)+2) + } else { + sn = sn[:len(xn)+2] + } + // use a custom sep, so that misses are less frequent, + // since the sep (first char in search) is as unique as first char in field name. + sep = tiSep(xn) + sn[0], sn[len(sn)-1] = sep, 0xff + copy(sn[1:], xn) + j := bytes.Index(sa, sn) + if j == -1 { + sa = append(sa, sep) + sa = append(sa, xn...) + sa = append(sa, 0xff, byte(ui>>8), byte(ui)) + } else { + index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 + // one of them must be reset to nil, + // and the index updated appropriately to the other one + if x[i].nis == x[index].nis { + } else if x[i].nis < x[index].nis { + sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) + if x[index].ready() { + x[index].flagClr(structFieldInfoFlagReady) + n-- + } + } else { + if x[i].ready() { + x[i].flagClr(structFieldInfoFlagReady) + n-- + } + } + } + + } + var w []structFieldInfo + sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray + if sharingArray { + w = make([]structFieldInfo, n) + } + + // remove all the nils (non-ready) + y = make([]*structFieldInfo, n) + n = 0 + var sslen int + for i := range x { + if !x[i].ready() { + continue + } + if !anyOmitEmpty && x[i].omitEmpty() { + anyOmitEmpty = true + } + if sharingArray { + w[n] = x[i] + y[n] = &w[n] + } else { + y[n] = &x[i] + } + sslen = sslen + len(x[i].encName) + 4 + n++ + } + if n != len(y) { + panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", + rt, len(y), len(x), n) + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + + sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen + if sharingArray { + ss = make([]byte, 0, sslen) + } else { + ss = sa[:0] // reuse the newly made sa array if necessary + } + for i := range z { + xn = z[i].encName + sep = tiSep(xn) + ui = uint16(i) + ss = append(ss, sep) + ss = append(ss, xn...) + ss = append(ss, 0xff, byte(ui>>8), byte(ui)) + } + return +} + +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: +// - does it implement IsZero() bool +// - is it comparable, and can i compare directly using == +// - if checkStruct, then walk through the encodable fields +// and check if they are empty or not. +func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + // v is a struct kind - no need to check again. + // We only check isZero on a struct kind, to reduce the amount of times + // that we lookup the rtid and typeInfo for each type as we walk the tree. + + vt := v.Type() + rtid := rt2id(vt) + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.get(rtid, vt) + if ti.rtid == timeTypId { + return rv2i(v).(time.Time).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { + return rv2i(v.Addr()).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroer) { + return rv2i(v).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagComparable) { + return rv2i(v) == rv2i(reflect.Zero(vt)) + } + if !checkStruct { + return false + } + // We only care about what we can encode/decode, + // so that is what we use to check omitEmpty. + for _, si := range ti.sfiSrc { + sfv, valid := si.field(v, false) + if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { + return false + } + } + return true +} + +// func roundFloat(x float64) float64 { +// t := math.Trunc(x) +// if math.Abs(x-t) >= 0.5 { +// return t + math.Copysign(1, x) +// } +// return t +// } + +func panicToErr(h errDecorator, err *error) { + // Note: This method MUST be called directly from defer i.e. defer panicToErr ... + // else it seems the recover is not fully handled + if recoverPanicToErr { + if x := recover(); x != nil { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + panicValToErr(h, x, err) + } + } +} + +func panicValToErr(h errDecorator, v interface{}, err *error) { + switch xerr := v.(type) { + case nil: + case error: + switch xerr { + case nil: + case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: + // treat as special (bubble up) + *err = xerr + default: + h.wrapErr(xerr, err) + } + case string: + if xerr != "" { + h.wrapErr(xerr, err) + } + case fmt.Stringer: + if xerr != nil { + h.wrapErr(xerr, err) + } + default: + h.wrapErr(v, err) + } +} + +func isImmutableKind(k reflect.Kind) (v bool) { + // return immutableKindsSet[k] + // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint + return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrF bool // if addrD, this says whether decode function can take a value or a ptr + addrE bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) + _ [1]uint64 // padding +} + +type codecRtidFn struct { + rtid uintptr + fn *codecFn +} + +// ---- + +// these "checkOverflow" functions must be inlinable, and not call anybody. +// Overflow means that the value cannot be represented without wrapping/overflow. +// Overflow=false does not mean that the value can be represented without losing precision +// (especially for floating point). + +type checkOverflow struct{} + +// func (checkOverflow) Float16(f float64) (overflow bool) { +// panicv.errorf("unimplemented") +// if f < 0 { +// f = -f +// } +// return math.MaxFloat32 < f && f <= math.MaxFloat64 +// } + +func (checkOverflow) Float32(v float64) (overflow bool) { + if v < 0 { + v = -v + } + return math.MaxFloat32 < v && v <= math.MaxFloat64 +} +func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) SignedInt(v uint64) (overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + } + } + return +} + +func (x checkOverflow) Float32V(v float64) float64 { + if x.Float32(v) { + panicv.errorf("float32 overflow: %v", v) + } + return v +} +func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { + if x.Uint(v, bitsize) { + panicv.errorf("uint64 overflow: %v", v) + } + return v +} +func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { + if x.Int(v, bitsize) { + panicv.errorf("int64 overflow: %v", v) + } + return v +} +func (x checkOverflow) SignedIntV(v uint64) int64 { + if x.SignedInt(v) { + panicv.errorf("uint64 to int64 overflow: %v", v) + } + return int64(v) +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type ioFlusher interface { + Flush() error +} + +type ioPeeker interface { + Peek(int) ([]byte, error) +} + +type ioBuffered interface { + Buffered() int +} + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 + +// type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string + +// type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p uintptrSlice) Len() int { return len(p) } +// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) +} +func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p bytesSlice) Len() int { return len(p) } +// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } +// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } +func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// --------------------- + +type sfiRv struct { + v *structFieldInfo + r reflect.Value +} + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv +type timeRv struct { + v time.Time + r reflect.Value +} +type timeRvSlice []timeRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) +} +func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } +func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p timeRvSlice) Len() int { return len(p) } +func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } +func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { return } - if i != 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) + if len(x) == 1 { + if x[0] == v { + x[0] = 0 } + return } - if ui != 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return } } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. +// +// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). +// +// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces +// bounds checking, so we discarded them, and everyone uses bitset256. +// +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// func (x *bitset256) issetv(pos byte) byte { +// return x[pos>>3] & (1 << (pos & 7)) +// } + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset256) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +// type bit2set256 [64]byte + +// func (x *bit2set256) set(pos byte, v1, v2 bool) { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// if v1 { +// x[pos>>2] |= 1 << (pos2 + 1) +// } +// if v2 { +// x[pos>>2] |= 1 << pos2 +// } +// } +// func (x *bit2set256) get(pos byte) uint8 { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 +// } + +// ------------ + +type pooler struct { + // function-scoped pooled resources + tiload sync.Pool // for type info loading + sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding + + // lifetime-scoped pooled resources + // dn sync.Pool // for decNaked + buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte +} + +func (p *pooler) init() { + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } + + p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } + p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } + p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } + p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } + p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } + + // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + + p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } + p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } + p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } + p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } + p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } + p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } + p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } + +} + +// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { +// return &p.strRv8, p.strRv8.Get() +// } +// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { +// return &p.strRv16, p.strRv16.Get() +// } +// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { +// return &p.strRv32, p.strRv32.Get() +// } +// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { +// return &p.strRv64, p.strRv64.Get() +// } +// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { +// return &p.strRv128, p.strRv128.Get() +// } + +// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { +// return &p.buf1k, p.buf1k.Get() +// } +// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { +// return &p.buf2k, p.buf2k.Get() +// } +// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { +// return &p.buf4k, p.buf4k.Get() +// } +// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { +// return &p.buf8k, p.buf8k.Get() +// } +// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { +// return &p.buf16k, p.buf16k.Get() +// } +// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { +// return &p.buf32k, p.buf32k.Get() +// } +// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { +// return &p.buf64k, p.buf64k.Get() +// } + +// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { +// return &p.tiload, p.tiload.Get() +// } + +// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { +// return &p.dn, p.dn.Get() +// } + +// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { +// sp := &(p.dn) +// vv := sp.Get() +// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } +// } +// func (p *pooler) decNakedGet() (v interface{}) { +// return p.dn.Get() +// } +// func (p *pooler) tiLoadGet() (v interface{}) { +// return p.tiload.Get() +// } +// func (p *pooler) decNakedPut(v interface{}) { +// p.dn.Put(v) +// } +// func (p *pooler) tiLoadPut(v interface{}) { +// p.tiload.Put(v) +// } + +// ---------------------------------------------------- + +type panicHdl struct{} + +func (panicHdl) errorv(err error) { + if err != nil { + panic(err) + } +} + +func (panicHdl) errorstr(message string) { + if message != "" { + panic(message) + } +} + +func (panicHdl) errorf(format string, params ...interface{}) { + if format == "" { + } else if len(params) == 0 { + panic(format) + } else { + panic(fmt.Sprintf(format, params...)) + } +} + +// ---------------------------------------------------- + +type errDecorator interface { + wrapErr(in interface{}, out *error) +} + +type errDecoratorDef struct{} + +func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } + +// ---------------------------------------------------- + +type must struct{} + +func (must) String(s string, err error) string { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Int(s int64, err error) int64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Uint(s uint64, err error) uint64 { + if err != nil { + panicv.errorv(err) + } + return s } +func (must) Float(s float64, err error) float64 { + if err != nil { + panicv.errorv(err) + } + return s +} + +// ------------------- + +type bytesBufPooler struct { + pool *sync.Pool + poolbuf interface{} +} + +func (z *bytesBufPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } +} + +func (z *bytesBufPooler) get(bufsize int) (buf []byte) { + // ensure an end is called first (if necessary) + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } + + // // Try to use binary search. + // // This is not optimal, as most folks select 1k or 2k buffers + // // so a linear search is better (sequence of if/else blocks) + // if bufsize < 1 { + // bufsize = 0 + // } else { + // bufsize-- + // bufsize /= 1024 + // } + // switch bufsize { + // case 0: + // z.pool, z.poolbuf = pool.bytes1k() + // buf = z.poolbuf.(*[1 * 1024]byte)[:] + // case 1: + // z.pool, z.poolbuf = pool.bytes2k() + // buf = z.poolbuf.(*[2 * 1024]byte)[:] + // case 2, 3: + // z.pool, z.poolbuf = pool.bytes4k() + // buf = z.poolbuf.(*[4 * 1024]byte)[:] + // case 4, 5, 6, 7: + // z.pool, z.poolbuf = pool.bytes8k() + // buf = z.poolbuf.(*[8 * 1024]byte)[:] + // case 8, 9, 10, 11, 12, 13, 14, 15: + // z.pool, z.poolbuf = pool.bytes16k() + // buf = z.poolbuf.(*[16 * 1024]byte)[:] + // case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: + // z.pool, z.poolbuf = pool.bytes32k() + // buf = z.poolbuf.(*[32 * 1024]byte)[:] + // default: + // z.pool, z.poolbuf = pool.bytes64k() + // buf = z.poolbuf.(*[64 * 1024]byte)[:] + // } + // return + + if bufsize <= 1*1024 { + z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() + buf = z.poolbuf.(*[1 * 1024]byte)[:] + } else if bufsize <= 2*1024 { + z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() + buf = z.poolbuf.(*[2 * 1024]byte)[:] + } else if bufsize <= 4*1024 { + z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() + buf = z.poolbuf.(*[4 * 1024]byte)[:] + } else if bufsize <= 8*1024 { + z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() + buf = z.poolbuf.(*[8 * 1024]byte)[:] + } else if bufsize <= 16*1024 { + z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() + buf = z.poolbuf.(*[16 * 1024]byte)[:] + } else if bufsize <= 32*1024 { + z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() + buf = z.poolbuf.(*[32 * 1024]byte)[:] + } else { + z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() + buf = z.poolbuf.(*[64 * 1024]byte)[:] + } + return +} + +// ---------------- + +type sfiRvPooler struct { + pool *sync.Pool + poolv interface{} +} + +func (z *sfiRvPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolv) + z.pool, z.poolv = nil, nil + } +} + +func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { + if newlen < 0 { // bounds-check-elimination + // cannot happen // here for bounds-check-elimination + } else if newlen <= 8 { + z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() + fkvs = z.poolv.(*[8]sfiRv)[:newlen] + } else if newlen <= 16 { + z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() + fkvs = z.poolv.(*[16]sfiRv)[:newlen] + } else if newlen <= 32 { + z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() + fkvs = z.poolv.(*[32]sfiRv)[:newlen] + } else if newlen <= 64 { + z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() + fkvs = z.poolv.(*[64]sfiRv)[:newlen] + } else if newlen <= 128 { + z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() + fkvs = z.poolv.(*[128]sfiRv)[:newlen] + } else { + fkvs = make([]sfiRv, newlen) + } + return +} + +// xdebugf printf. the message in red on the terminal. +// Use it in place of fmt.Printf (which it calls internally) +func xdebugf(pattern string, args ...interface{}) { + var delim string + if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' { + delim = "\n" + } + fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...) +} + +// func isImmutableKind(k reflect.Kind) (v bool) { +// return false || +// k == reflect.Int || +// k == reflect.Int8 || +// k == reflect.Int16 || +// k == reflect.Int32 || +// k == reflect.Int64 || +// k == reflect.Uint || +// k == reflect.Uint8 || +// k == reflect.Uint16 || +// k == reflect.Uint32 || +// k == reflect.Uint64 || +// k == reflect.Uintptr || +// k == reflect.Float32 || +// k == reflect.Float64 || +// k == reflect.Bool || +// k == reflect.String +// } + +// func timeLocUTCName(tzint int16) string { +// if tzint == 0 { +// return "UTC" +// } +// var tzname = []byte("UTC+00:00") +// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below. +// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first +// var tzhr, tzmin int16 +// if tzint < 0 { +// tzname[3] = '-' // (TODO: verify. this works here) +// tzhr, tzmin = -tzint/60, (-tzint)%60 +// } else { +// tzhr, tzmin = tzint/60, tzint%60 +// } +// tzname[4] = timeDigits[tzhr/10] +// tzname[5] = timeDigits[tzhr%10] +// tzname[7] = timeDigits[tzmin/10] +// tzname[8] = timeDigits[tzmin%10] +// return string(tzname) +// //return time.FixedZone(string(tzname), int(tzint)*60) +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go index 93f12854f21..0cbd665e257 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -1,132 +1,121 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec // All non-std package dependencies live in this file, // so porting to different environment is easy (just update functions). -import ( - "errors" - "fmt" - "math" - "reflect" -) - -var ( - raisePanicAfterRecover = false - debugging = true -) - -func panicValToErr(panicVal interface{}, err *error) { - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - if raisePanicAfterRecover { - panic(panicVal) +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } } return } -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 } - case reflect.Struct: - if !checkStruct { - return false + // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 } - // return true if all fields are empty. else return false. - - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 } - return true + return (s << 31) | 0x7f800000 | (m << 13) // NaN } - return false + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m } -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) -} +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account -func debugf(format string, args ...interface{}) { - if debugging { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 } -} -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 } - return -} + newCap = x * oldCap / 4 -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return + if num > 0 { + newCap += num } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) } } - return false, 0 + return } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go new file mode 100644 index 00000000000..74987f9f7f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go @@ -0,0 +1,331 @@ +// +build !go1.7 safe appengine + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" +) + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unnecessarily incur the cost of reflection this early. + return false +} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +// func rv2rtid(rv reflect.Value) uintptr { +// return reflect.ValueOf(rv.Type()).Pointer() +// } + +func i2rtid(i interface{}) uintptr { + return reflect.ValueOf(reflect.TypeOf(i)).Pointer() +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return v.IsNil() + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + } + return false +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (*ptrToRvMap) init() {} +// func (*ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicClsErr struct { + v atomic.Value +} + +func (x *atomicClsErr) load() (e clsErr) { + if i := x.v.Load(); i != nil { + e = i.(clsErr) + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + x.v.Store(p) +} + +// -------------------------- +type atomicTypeInfoSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { + if i := x.v.Load(); i != nil { + e = i.([]rtid2ti) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +type atomicRtidFnSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { + if i := x.v.Load(); i != nil { + e = i.([]codecRtidFn) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + x.v.Store(p) +} + +// -------------------------- +func (n *decNaked) ru() reflect.Value { + return reflect.ValueOf(&n.u).Elem() +} +func (n *decNaked) ri() reflect.Value { + return reflect.ValueOf(&n.i).Elem() +} +func (n *decNaked) rf() reflect.Value { + return reflect.ValueOf(&n.f).Elem() +} +func (n *decNaked) rl() reflect.Value { + return reflect.ValueOf(&n.l).Elem() +} +func (n *decNaked) rs() reflect.Value { + return reflect.ValueOf(&n.s).Elem() +} +func (n *decNaked) rt() reflect.Value { + return reflect.ValueOf(&n.t).Elem() +} +func (n *decNaked) rb() reflect.Value { + return reflect.ValueOf(&n.b).Elem() +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + rv.Set(reflect.ValueOf(d.d.DecodeTime())) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + rv.SetFloat(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat64()) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt64()) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint64()) +} + +// ---------------- + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeTime(rv2i(rv).(time.Time)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + s := rv.String() + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +// func definitelyNil(v interface{}) bool { +// rv := reflect.ValueOf(v) +// switch rv.Kind() { +// case reflect.Invalid: +// return true +// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: +// return rv.IsNil() +// default: +// return false +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go new file mode 100644 index 00000000000..3bc34d90d7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go @@ -0,0 +1,745 @@ +// +build !safe +// +build !appengine +// +build go1.7 + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" + "unsafe" +) + +// This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. + +// var zeroRTv [4]uintptr + +const safeMode = false +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + +type unsafeString struct { + Data unsafe.Pointer + Len int +} + +type unsafeSlice struct { + Data unsafe.Pointer + Len int + Cap int +} + +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len})) +} + +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + sx := (*unsafeString)(unsafe.Pointer(&v)) + return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len})) +} + +func definitelyNil(v interface{}) bool { + // There is no global way of checking if an interface is nil. + // For true references (map, ptr, func, chan), you can just look + // at the word of the interface. However, for slices, you have to dereference + // the word, and get a pointer to the 3-word interface value. + // + // However, the following are cheap calls + // - TypeOf(interface): cheap 2-line call. + // - ValueOf(interface{}): expensive + // - type.Kind: cheap call through an interface + // - Value.Type(): cheap call + // except it's a method value (e.g. r.Read, which implies that it is a Func) + + return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil +} + +func rv2i(rv reflect.Value) interface{} { + // TODO: consider a more generally-known optimization for reflect.Value ==> Interface + // + // Currently, we use this fragile method that taps into implememtation details from + // the source go stdlib reflect/value.go, and trims the implementation. + + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir + var ptr unsafe.Pointer + if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { + ptr = *(*unsafe.Pointer)(urv.ptr) + } else { + ptr = urv.ptr + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +} + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +// func rv2rtid(rv reflect.Value) uintptr { +// return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +// } + +func i2rtid(i interface{}) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ) +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + urv := (*unsafeReflectValue)(unsafe.Pointer(&v)) + if urv.flag == 0 { + return true + } + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.String: + return (*unsafeString)(urv.ptr).Len == 0 + case reflect.Slice: + return (*unsafeSlice)(urv.ptr).Len == 0 + case reflect.Bool: + return !*(*bool)(urv.ptr) + case reflect.Int: + return *(*int)(urv.ptr) == 0 + case reflect.Int8: + return *(*int8)(urv.ptr) == 0 + case reflect.Int16: + return *(*int16)(urv.ptr) == 0 + case reflect.Int32: + return *(*int32)(urv.ptr) == 0 + case reflect.Int64: + return *(*int64)(urv.ptr) == 0 + case reflect.Uint: + return *(*uint)(urv.ptr) == 0 + case reflect.Uint8: + return *(*uint8)(urv.ptr) == 0 + case reflect.Uint16: + return *(*uint16)(urv.ptr) == 0 + case reflect.Uint32: + return *(*uint32)(urv.ptr) == 0 + case reflect.Uint64: + return *(*uint64)(urv.ptr) == 0 + case reflect.Uintptr: + return *(*uintptr)(urv.ptr) == 0 + case reflect.Float32: + return *(*float32)(urv.ptr) == 0 + case reflect.Float64: + return *(*float64)(urv.ptr) == 0 + case reflect.Interface: + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Ptr: + // isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type) + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + case reflect.Map, reflect.Array, reflect.Chan: + return v.Len() == 0 + } + return false +} + +// -------------------------- + +// atomicXXX is expected to be 2 words (for symmetry with atomic.Value) +// +// Note that we do not atomically load/store length and data pointer separately, +// as this could lead to some races. Instead, we atomically load/store cappedSlice. +// +// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly. + +// ---------------------- +type atomicTypeInfoSlice struct { + v unsafe.Pointer // *[]rtid2ti + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicTypeInfoSlice) load() (s []rtid2ti) { + x2 := atomic.LoadPointer(&x.v) + if x2 != nil { + s = *(*[]rtid2ti)(x2) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- +type atomicRtidFnSlice struct { + v unsafe.Pointer // *[]codecRtidFn + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicRtidFnSlice) load() (s []codecRtidFn) { + x2 := atomic.LoadPointer(&x.v) + if x2 != nil { + s = *(*[]codecRtidFn)(x2) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- +type atomicClsErr struct { + v unsafe.Pointer // *clsErr + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicClsErr) load() (e clsErr) { + x2 := (*clsErr)(atomic.LoadPointer(&x.v)) + if x2 != nil { + e = *x2 + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- + +// to create a reflect.Value for each member field of decNaked, +// we first create a global decNaked, and create reflect.Value +// for them all. +// This way, we have the flags and type in the reflect.Value. +// Then, when a reflect.Value is called, we just copy it, +// update the ptr to the decNaked's, and return it. + +type unsafeDecNakedWrapper struct { + decNaked + ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above +} + +func (n *unsafeDecNakedWrapper) init() { + n.ru = reflect.ValueOf(&n.u).Elem() + n.ri = reflect.ValueOf(&n.i).Elem() + n.rf = reflect.ValueOf(&n.f).Elem() + n.rl = reflect.ValueOf(&n.l).Elem() + n.rs = reflect.ValueOf(&n.s).Elem() + n.rt = reflect.ValueOf(&n.t).Elem() + n.rb = reflect.ValueOf(&n.b).Elem() + // n.rr[] = reflect.ValueOf(&n.) +} + +var defUnsafeDecNakedWrapper unsafeDecNakedWrapper + +func init() { + defUnsafeDecNakedWrapper.init() +} + +func (n *decNaked) ru() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.ru + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u) + return +} +func (n *decNaked) ri() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.ri + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i) + return +} +func (n *decNaked) rf() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rf + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f) + return +} +func (n *decNaked) rl() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rl + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l) + return +} +func (n *decNaked) rs() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rs + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s) + return +} +func (n *decNaked) rt() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rt + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t) + return +} +func (n *decNaked) rb() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rb + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b) + return +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*time.Time)(urv.ptr) = d.d.DecodeTime() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat64() +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt64() +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint64() +} + +// ------------ + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeBool(*(*bool)(v.ptr)) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeTime(*(*time.Time)(v.ptr)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + s := *(*string)(v.ptr) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat64(*(*float64)(v.ptr)) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat32(*(*float32)(v.ptr)) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int)(v.ptr))) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int8)(v.ptr))) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int16)(v.ptr))) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int32)(v.ptr))) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int64)(v.ptr))) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint)(v.ptr))) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint8)(v.ptr))) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint16)(v.ptr))) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint32)(v.ptr))) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint64)(v.ptr))) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uintptr)(v.ptr))) +} + +// ------------ + +// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // if urv.flag&unsafeFlagIndir != 0 { +// // urv.ptr = *(*unsafe.Pointer)(urv.ptr) +// // } +// *(*[]byte)(urv.ptr) = d.rawBytes() +// } + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir +// var ptr unsafe.Pointer +// // kk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { +// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { +// ptr = *(*unsafe.Pointer)(urv.ptr) +// } else { +// ptr = urv.ptr +// } +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// func definitelyNil(v interface{}) bool { +// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) +// if ui.word == nil { +// return true +// } +// var tk = reflect.TypeOf(v).Kind() +// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil +// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", +// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) +// } + +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/json.go b/vendor/github.com/hashicorp/go-msgpack/codec/json.go new file mode 100644 index 00000000000..a731c8165e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/json.go @@ -0,0 +1,1491 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "math" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', 't', 'r', 'u', 'e', '"', + '"', 'f', 'a', 'l', 's', 'e', '"', + '"', 'n', 'u', 'l', 'l', '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + // jsonLitNullQ = 13 + jsonLitNull = 14 +) + +var ( + jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4] + jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5] + jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4] +) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 + + jsonScratchArrayLen = 64 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonAlwaysReturnInternString = false +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte + + jsonCharHtmlSafeSet bitset256 + jsonCharSafeSet bitset256 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 +) + +func init() { + var i byte + for i = 0; i < jsonSpacesOrTabsLen; i++ { + jsonSpaces[i] = ' ' + jsonTabs[i] = '\t' + } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } +} + +// ---------------- + +type jsonEncDriverTypical struct { + jsonEncDriver +} + +func (e *jsonEncDriverTypical) typical() {} + +func (e *jsonEncDriverTypical) WriteArrayStart(length int) { + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverTypical) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverTypical) WriteArrayEnd() { + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverTypical) WriteMapStart(length int) { + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverTypical) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + e.c = containerMapKey +} + +func (e *jsonEncDriverTypical) WriteMapElemValue() { + e.w.writen1(':') + e.c = containerMapValue +} + +func (e *jsonEncDriverTypical) WriteMapEnd() { + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverTypical) EncodeBool(b bool) { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } +} + +func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { + fmt, prec := jsonFloatStrconvFmtPrec(f) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) +} + +func (e *jsonEncDriverTypical) EncodeInt(v int64) { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeUint(v uint64) { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverTypical) atEndOfEncode() { +// if e.tw { +// e.w.writen1(' ') +// } +// } + +// ---------------- + +type jsonEncDriverGeneric struct { + jsonEncDriver + // ds string // indent string + di int8 // indent per + d bool // indenting? + dt bool // indent using tabs + dl uint16 // indent level + ks bool // map key as string + is byte // integer as string + _ byte // padding + _ [2]uint64 // padding +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriverGeneric) reset() { + e.jsonEncDriver.reset() + e.d, e.dt, e.dl, e.di = false, false, 0, 0 + if e.h.Indent > 0 { + e.d = true + e.di = int8(e.h.Indent) + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.di = int8(-e.h.Indent) + } + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString +} + +func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverGeneric) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverGeneric) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverGeneric) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverGeneric) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriverGeneric) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriverGeneric) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverGeneric) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.dt { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonTabs[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonSpaces[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverGeneric) EncodeBool(b bool) { + if e.ks && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { + // instead of using 'g', specify whether to use 'e' or 'f' + fmt, prec := jsonFloatStrconvFmtPrec(f) + + var blen int + if e.ks && e.c == containerMapKey { + blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) + e.b[0] = '"' + e.b[blen-1] = '"' + } else { + blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriverGeneric) EncodeInt(v int64) { + x := e.is + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { + x := e.is + if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { + // e.encodeFloat(float64(f), 32) + // always encode all floats as IEEE 64-bit floating point. + // It also ensures that we can decode in full precision even if into a float32, + // as what is written is always to float64 precision. + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverGeneric) atEndOfEncode() { +// if e.tw { +// if e.d { +// e.w.writen1('\n') +// } else { +// e.w.writen1(' ') +// } +// } +// } + +// -------------------- + +type jsonEncDriver struct { + noBuiltInTypes + e *Encoder + h *JsonHandle + w *encWriterSwitch + se extWrapper + // ---- cpu cache line boundary? + bs []byte // scratch + // ---- cpu cache line boundary? + // scratch: encode time, etc. + // include scratch buffer and padding, but leave space for containerstate + b [jsonScratchArrayLen + 8 + 8 - 1]byte + c containerState + // _ [2]uint64 // padding +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeTime(t time.Time) { + // Do NOT use MarshalJSON, as it allocates internally. + // instead, we call AppendFormat directly, using our scratch buffer (e.b) + if t.IsZero() { + e.EncodeNil() + } else { + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + } + // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.w.writeb(v) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if c == cRAW { + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + htmlasis := e.h.HTMLCharsAsIs + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { + if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + // if e.c == 0 { // scalar written, output space + // e.w.writen1(' ') + // } else if e.h.TermWhitespace { // container written, output new-line + // e.w.writen1('\n') + // } + if e.h.TermWhitespace { + if e.c == 0 { // scalar written, output space + e.w.writen1(' ') + } else { // container written, output new-line + e.w.writen1('\n') + } + } + + // e.c = 0 +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r *decReaderSwitch + se extWrapper + + // ---- writable fields during execution --- *try* to keep in sep cache line + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + fnull bool // found null from appendStringAsBytes + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + bstr [8]byte // scratch used for string \UXXX parsing + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time + b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes + + // _ [3]uint64 // padding + // n jsonNum +} + +// func jsonIsWS(b byte) bool { +// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' +// return jsonCharWhitespaceSet.isset(b) +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '{' + if d.tok != xc { + d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '[' + if d.tok != xc { + d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +// For the ReadXXX methods below, we could just delegate to helper functions +// readContainerState(c containerState, xc uint8, check bool) +// - ReadArrayElem would become: +// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) +// +// However, until mid-stack inlining comes in go1.11 which supports inlining of +// one-liners, we explicitly write them all 5 out to elide the extra func call. +// +// TODO: For Go 1.11, if inlined, consider consolidating these. + +func (d *jsonDecDriver) ReadArrayElem() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + if d.tok != xc { + d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + const xc uint8 = ']' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + if d.tok != xc { + d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + const xc uint8 = ':' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + const xc uint8 = '}' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readLit(length, fromIdx uint8) { +// // length here is always less than 8 (literals are: null, true, false) +// bs := d.r.readx(int(length)) +// d.tok = 0 +// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { +// d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) +// } +// } + +func (d *jsonDecDriver) readLit4True() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4True) { + d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs) + } +} + +func (d *jsonDecDriver) readLit4False() { + bs := d.r.readx(4) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4False) { + d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs) + } +} + +func (d *jsonDecDriver) readLit4Null() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4Null) { + d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs) + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // we shouldn't try to see if "null" was here, right? + // only the plain string: `null` denotes a nil (ie not quotes) + if d.tok == 'n' { + d.readLit4Null() + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit4False() + // v = false + case 't': + d.readLit4True() + v = true + default: + d.d.errorf("decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) DecodeTime() (t time.Time) { + // read string, and pass the string into json.unmarshal + d.appendStringAsBytes() + if d.fnull { + return + } + t, err := time.Parse(time.RFC3339, stringView(d.bs)) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + // optimize this, so we don't do 4 checks but do one computation. + // return jsonContainerSet[d.tok] + + // ContainerType is mostly called for Map and Array, + // so this conditional is good enough (max 2 checks typically) + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint64() (u uint64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing unsigned integer: %s", bs) + } else if neg { + d.d.errorf("minus found parsing unsigned integer: %s", bs) + } else if badsyntax { + // fallback: try to decode as float, and cast + n = d.decUint64ViaFloat(stringView(bs)) + } + return n +} + +func (d *jsonDecDriver) DecodeInt64() (i int64) { + const cutoff = uint64(1 << uint(64-1)) + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing integer: %s", bs) + } else if badsyntax { + // d.d.errorf("invalid syntax for integer: %s", bs) + // fallback: try to decode as float, and cast + if neg { + n = d.decUint64ViaFloat(stringView(bs[1:])) + } else { + n = d.decUint64ViaFloat(stringView(bs)) + } + } + if neg { + if n > cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = -(int64(n)) + } else { + if n >= cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = int64(n) + } + return +} + +func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { + if len(s) == 0 { + return + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + d.d.errorf("invalid syntax for integer: %s", s) + // d.d.errorv(err) + } + fi, ff := math.Modf(f) + if ff > 0 { + d.d.errorf("fractional part found parsing integer: %s", s) + } else if fi > float64(math.MaxUint64) { + d.d.errorf("overflow parsing integer: %s", s) + } + return uint64(fi) +} + +func (d *jsonDecDriver) DecodeFloat64() (f float64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err := strconv.ParseFloat(stringView(bs), 64) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.InterfaceExt != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.tok == '[' { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit4Null() + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit4False() + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit4True() + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + if len(bs) <= cap(d.bs) { + d.bs = d.bs[:len(bs)] + } else { + d.bs = make([]byte, len(bs)) + } + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = uint(len(cs)) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + var i, cursor uint + for { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = uint(len(cs)) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if cslen < i+4 { + d.d.errorf("need at least 4 more bytes for unicode sequence") + } + var j uint + for _, c = range cs[i+1 : i+5] { // bounds-check-elimination + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= int(i+6) { + var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination + if cx[0] == '\\' && cx[1] == 'u' { + i += 2 + var rr1 uint32 + for j = 2; j < 6; j++ { + c = cx[j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + goto encode_rune + } + } + r = unicode.ReplacementChar + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + const cutoff = uint64(1 << uint(64-1)) + + var n uint64 + var neg, badsyntax, overflow bool + + if len(bs) == 0 { + if d.h.PreferFloat { + z.v = valueTypeFloat + z.f = 0 + } else if d.h.SignedInteger { + z.v = valueTypeInt + z.i = 0 + } else { + z.v = valueTypeUint + z.u = 0 + } + return + } + if d.h.PreferFloat { + goto F + } + n, neg, badsyntax, overflow = jsonParseInteger(bs) + if badsyntax || overflow { + goto F + } + if neg { + if n > cutoff { + goto F + } + z.v = valueTypeInt + z.i = -(int64(n)) + } else if d.h.SignedInteger { + if n >= cutoff { + goto F + } + z.v = valueTypeInt + z.i = int64(n) + } else { + z.v = valueTypeUint + z.u = n + } + return +F: + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.naked() + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit4Null() + z.v = valueTypeNil + case 'f': + d.readLit4False() + z.v = valueTypeBool + z.b = false + case 't': + d.readLit4True() + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ [2]byte // padding + + // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + _ [2]uint64 // padding +} + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } +func (h *JsonHandle) hasElemSeparators() bool { return true } +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +type jsonTypical interface { + typical() +} + +func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { + _, v = ed.(jsonTypical) + return v != h.typical() +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { + var hd *jsonEncDriver + if h.typical() { + var v jsonEncDriverTypical + ee = &v + hd = &v.jsonEncDriver + } else { + var v jsonEncDriverGeneric + ee = &v + hd = &v.jsonEncDriver + } + hd.e, hd.h, hd.bs = e, h, hd.b[:0] + hd.se.BytesExt = bytesExtFailer{} + ee.reset() + return +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.se.BytesExt = bytesExtFailer{} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.InterfaceExt = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.c = 0 +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.InterfaceExt = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { + prec = -1 + var abs = math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } else { + fmt = 'f' + // set prec to 1 iff mod is 0. + // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. + // this ensures that every float has an e or .0 in it. + if abs <= 1 { + if abs == 0 || abs == 1 { + prec = 1 + } + } else if _, mod := math.Modf(abs); mod == 0 { + prec = 1 + } + } + return +} + +// custom-fitted version of strconv.Parse(Ui|I)nt. +// Also ensures we don't have to search for .eE to determine if a float or not. +// Note: s CANNOT be a zero-length slice. +func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { + const maxUint64 = (1<<64 - 1) + const cutoff = maxUint64/10 + 1 + + if len(s) == 0 { // bounds-check-elimination + // treat empty string as zero value + // badSyntax = true + return + } + switch s[0] { + case '+': + s = s[1:] + case '-': + s = s[1:] + neg = true + } + for _, c := range s { + if c < '0' || c > '9' { + badSyntax = true + return + } + // unsigned integers don't overflow well on multiplication, so check cutoff here + // e.g. (maxUint64-5)*10 doesn't overflow well ... + if n >= cutoff { + overflow = true + return + } + n *= 10 + n1 := n + uint64(c-'0') + if n1 < n || n1 > maxUint64 { + overflow = true + return + } + n = n1 + } + return +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriverGeneric)(nil) +var _ encDriver = (*jsonEncDriverTypical)(nil) +var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl new file mode 100644 index 00000000000..c598cc73a5e --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl @@ -0,0 +1,154 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. + +package codec + +import "testing" +import "fmt" +import "reflect" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + var v{{$i}}va [8]{{ .Elem }} + for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/* + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + // - encode value to some []byte + // - decode into a length-wise-equal []byte + // - check if equal to initial slice + // - encode ptr to the value + // - check if encode bytes are same + // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice + // - decode into non-addressable slice of equal length, then larger len + // - for each decode, compare elem-by-elem to the original slice + // - + // - rinse and repeat for a MapBySlice version + // - + */}} + var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") + // ... + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:1:1] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") + if len(v{{$i}}v1) > 1 { + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") + } + // ... + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v2 = nil + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") + // ... + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} + v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl new file mode 100644 index 00000000000..71eaf618a50 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl @@ -0,0 +1,94 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. + +package codec + +// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... +// +// Add: +// - test file for creating a mammoth generated file as _mammoth_generated.go +// - generate a second mammoth files in a different file: mammoth2_generated_test.go +// - mammoth-test.go.tmpl will do this +// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) +// - as part of TestMammoth, run it also +// - this will cover all the codecgen, gen-helper, etc in one full run +// - check in mammoth* files into github also +// - then +// +// Now, add some types: +// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it +// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types +// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) + + +// import "encoding/binary" +import "fmt" + +type TestMammoth2 struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +// ----------- + +type testMammoth2Binary uint64 +func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { +data = make([]byte, 8) +bigen.PutUint64(data, uint64(x)) +return +} +func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { +*x = testMammoth2Binary(bigen.Uint64(data)) +return +} + +type testMammoth2Text uint64 +func (x testMammoth2Text) MarshalText() (data []byte, err error) { +data = []byte(fmt.Sprintf("%b", uint64(x))) +return +} +func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) +return +} + +type testMammoth2Json uint64 +func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { +data = []byte(fmt.Sprintf("%v", uint64(x))) +return +} +func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) +return +} + +type testMammoth2Basic [4]uint64 + +type TestMammoth2Wrapper struct { + V TestMammoth2 + T testMammoth2Text + B testMammoth2Binary + J testMammoth2Json + C testMammoth2Basic + M map[testMammoth2Basic]TestMammoth2 + L []TestMammoth2 + A [4]int64 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go index da0500d1922..bf311a6778a 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -1,5 +1,5 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. /* MSGPACK @@ -15,8 +15,8 @@ For compatibility with behaviour of msgpack-c reference implementation: - Go intX (<0) IS ENCODED AS msgpack -ve fixnum, signed - */ + package codec import ( @@ -24,59 +24,144 @@ import ( "io" "math" "net/rpc" + "reflect" + "time" ) const ( mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 + mpPosFixNumMax byte = 0x7f + mpFixMapMin byte = 0x80 + mpFixMapMax byte = 0x8f + mpFixArrayMin byte = 0x90 + mpFixArrayMax byte = 0x9f + mpFixStrMin byte = 0xa0 + mpFixStrMax byte = 0xbf + mpNil byte = 0xc0 + _ byte = 0xc1 + mpFalse byte = 0xc2 + mpTrue byte = 0xc3 + mpFloat byte = 0xca + mpDouble byte = 0xcb + mpUint8 byte = 0xcc + mpUint16 byte = 0xcd + mpUint32 byte = 0xce + mpUint64 byte = 0xcf + mpInt8 byte = 0xd0 + mpInt16 byte = 0xd1 + mpInt32 byte = 0xd2 + mpInt64 byte = 0xd3 // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff + mpBin8 byte = 0xc4 + mpBin16 byte = 0xc5 + mpBin32 byte = 0xc6 + mpExt8 byte = 0xc7 + mpExt16 byte = 0xc8 + mpExt32 byte = 0xc9 + mpFixExt1 byte = 0xd4 + mpFixExt2 byte = 0xd5 + mpFixExt4 byte = 0xd6 + mpFixExt8 byte = 0xd7 + mpFixExt16 byte = 0xd8 + + mpStr8 byte = 0xd9 // new + mpStr16 byte = 0xda + mpStr32 byte = 0xdb + + mpArray16 byte = 0xdc + mpArray32 byte = 0xdd + + mpMap16 byte = 0xde + mpMap32 byte = 0xdf + + mpNegFixNumMin byte = 0xe0 + mpNegFixNumMax byte = 0xff ) +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +// var mpdesc = map[byte]string{ +// mpPosFixNumMin: "PosFixNumMin", +// mpPosFixNumMax: "PosFixNumMax", +// mpFixMapMin: "FixMapMin", +// mpFixMapMax: "FixMapMax", +// mpFixArrayMin: "FixArrayMin", +// mpFixArrayMax: "FixArrayMax", +// mpFixStrMin: "FixStrMin", +// mpFixStrMax: "FixStrMax", +// mpNil: "Nil", +// mpFalse: "False", +// mpTrue: "True", +// mpFloat: "Float", +// mpDouble: "Double", +// mpUint8: "Uint8", +// mpUint16: "Uint16", +// mpUint32: "Uint32", +// mpUint64: "Uint64", +// mpInt8: "Int8", +// mpInt16: "Int16", +// mpInt32: "Int32", +// mpInt64: "Int64", +// mpBin8: "Bin8", +// mpBin16: "Bin16", +// mpBin32: "Bin32", +// mpExt8: "Ext8", +// mpExt16: "Ext16", +// mpExt32: "Ext32", +// mpFixExt1: "FixExt1", +// mpFixExt2: "FixExt2", +// mpFixExt4: "FixExt4", +// mpFixExt8: "FixExt8", +// mpFixExt16: "FixExt16", +// mpStr8: "Str8", +// mpStr16: "Str16", +// mpStr32: "Str32", +// mpArray16: "Array16", +// mpArray32: "Array32", +// mpMap16: "Map16", +// mpMap32: "Map32", +// mpNegFixNumMin: "NegFixNumMin", +// mpNegFixNumMax: "NegFixNumMax", +// } + +func mpdesc(bd byte) string { + switch bd { + case mpNil: + return "nil" + case mpFalse: + return "false" + case mpTrue: + return "true" + case mpFloat, mpDouble: + return "float" + case mpUint8, mpUint16, mpUint32, mpUint64: + return "uint" + case mpInt8, mpInt16, mpInt32, mpInt64: + return "int" + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + return "int" + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + return "int" + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + return "string|bytes" + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + return "bytes" + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + return "array" + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + return "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + return "ext" + default: + return "unknown" + } + } +} + // MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec // that the backend RPC service takes multiple arguments, which have been arranged // in sequence in the slice. @@ -87,76 +172,102 @@ type MsgpackSpecRpcMultiArgs []interface{} // A MsgpackContainer type specifies the different types of msgpackContainers. type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool + fixCutoff uint8 + bFixMin, b8, b16, b32 byte + // hasFixMin, has8, has8Always bool } var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} + msgpackContainerRawLegacy = msgpackContainerType{ + 32, mpFixStrMin, 0, mpStr16, mpStr32, + } + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, + } ) //--------------------------------------------- type msgpackEncDriver struct { - w encWriter + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w *encWriterSwitch h *MsgpackHandle + x [8]byte + // _ [3]uint64 // padding } -func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} - -func (e *msgpackEncDriver) encodeNil() { +func (e *msgpackEncDriver) EncodeNil() { e.w.writen1(mpNil) } -func (e *msgpackEncDriver) encodeInt(i int64) { - - switch { - case i >= 0: - e.encodeUint(uint64(i)) - case i >= -32: - e.w.writen1(byte(i)) - case i >= math.MinInt8: +func (e *msgpackEncDriver) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { e.w.writen2(mpInt8, byte(i)) - case i >= math.MinInt16: + } else if i >= math.MinInt16 { e.w.writen1(mpInt16) - e.w.writeUint16(uint16(i)) - case i >= math.MinInt32: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { e.w.writen1(mpInt32) - e.w.writeUint32(uint32(i)) - default: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { e.w.writen1(mpInt64) - e.w.writeUint64(uint64(i)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) encodeUint(i uint64) { - switch { - case i <= math.MaxInt8: - e.w.writen1(byte(i)) - case i <= math.MaxUint8: +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { e.w.writen2(mpUint8, byte(i)) - case i <= math.MaxUint16: + } else if i <= math.MaxUint16 { e.w.writen1(mpUint16) - e.w.writeUint16(uint16(i)) - case i <= math.MaxUint32: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { e.w.writen1(mpUint32) - e.w.writeUint32(uint32(i)) - default: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { e.w.writen1(mpUint64) - e.w.writeUint64(uint64(i)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) encodeBool(b bool) { +func (e *msgpackEncDriver) EncodeBool(b bool) { if b { e.w.writen1(mpTrue) } else { @@ -164,229 +275,318 @@ func (e *msgpackEncDriver) encodeBool(b bool) { } } -func (e *msgpackEncDriver) encodeFloat32(f float32) { +func (e *msgpackEncDriver) EncodeFloat32(f float32) { e.w.writen1(mpFloat) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *msgpackEncDriver) encodeFloat64(f float64) { +func (e *msgpackEncDriver) EncodeFloat64(f float64) { e.w.writen1(mpDouble) - e.w.writeUint64(math.Float64bits(f)) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) + case 8: + bigenHelper{e.x[:8], e.w}.writeUint64(data64) + case 12: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) + } +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytesRaw(bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) } func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - switch { - case l == 1: + if l == 1 { e.w.writen2(mpFixExt1, xtag) - case l == 2: + } else if l == 2 { e.w.writen2(mpFixExt2, xtag) - case l == 4: + } else if l == 4 { e.w.writen2(mpFixExt4, xtag) - case l == 8: + } else if l == 8 { e.w.writen2(mpFixExt8, xtag) - case l == 16: + } else if l == 16 { e.w.writen2(mpFixExt16, xtag) - case l < 256: + } else if l < 256 { e.w.writen2(mpExt8, byte(l)) e.w.writen1(xtag) - case l < 65536: + } else if l < 65536 { e.w.writen1(mpExt16) - e.w.writeUint16(uint16(l)) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) e.w.writen1(xtag) - default: + } else { e.w.writen1(mpExt32) - e.w.writeUint32(uint32(l)) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) e.w.writen1(xtag) } } -func (e *msgpackEncDriver) encodeArrayPreamble(length int) { +func (e *msgpackEncDriver) WriteArrayStart(length int) { e.writeContainerLen(msgpackContainerList, length) } -func (e *msgpackEncDriver) encodeMapPreamble(length int) { +func (e *msgpackEncDriver) WriteMapStart(length int) { e.writeContainerLen(msgpackContainerMap, length) } -func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(s)) + e.writeContainerLen(msgpackContainerRawLegacy, slen) } - if len(s) > 0 { + if slen > 0 { e.w.writestr(s) } } -func (e *msgpackEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) +func (e *msgpackEncDriver) EncodeStringEnc(c charEncoding, s string) { + slen := len(s) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerStr, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } } -func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) + e.writeContainerLen(msgpackContainerRawLegacy, slen) } - if len(bs) > 0 { + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { e.w.writeb(bs) } } func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - switch { - case ct.hasFixMin && l < ct.fixCutoff: + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { e.w.writen1(ct.bFixMin | byte(l)) - case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + } else if ct.b8 > 0 && l < 256 { e.w.writen2(ct.b8, uint8(l)) - case l < 65536: + } else if l < 65536 { e.w.writen1(ct.b16) - e.w.writeUint16(uint16(l)) - default: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { e.w.writen1(ct.b32) - e.w.writeUint32(uint32(l)) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) } } //--------------------------------------------- type msgpackDecDriver struct { - r decReader - h *MsgpackHandle + d *Decoder + r *decReaderSwitch + h *MsgpackHandle + // b [scratchByteArrayLen]byte bd byte bdRead bool - bdType valueType + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding } -func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} - // Note: This returns either a primitive (int, bool, etc) for non-containers, // or a containerType, or a specific type denoting nil or extension. // It is called when a nil interface{} is passed, leaving it up to the DecDriver // to introspect the stream and decide how best to decode. // It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } bd := d.bd + n := d.d.naked() + var decodeFurther bool switch bd { case mpNil: - vt = valueTypeNil + n.v = valueTypeNil d.bdRead = false case mpFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case mpTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case mpFloat: - vt = valueTypeFloat - v = float64(math.Float32frombits(d.r.readUint32())) + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) case mpDouble: - vt = valueTypeFloat - v = math.Float64frombits(d.r.readUint64()) + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) case mpUint8: - vt = valueTypeUint - v = uint64(d.r.readn1()) + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) case mpUint16: - vt = valueTypeUint - v = uint64(d.r.readUint16()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) case mpUint32: - vt = valueTypeUint - v = uint64(d.r.readUint32()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) case mpUint64: - vt = valueTypeUint - v = uint64(d.r.readUint64()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) case mpInt8: - vt = valueTypeInt - v = int64(int8(d.r.readn1())) + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) case mpInt16: - vt = valueTypeInt - v = int64(int16(d.r.readUint16())) + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) case mpInt32: - vt = valueTypeInt - v = int64(int32(d.r.readUint32())) + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) case mpInt64: - vt = valueTypeInt - v = int64(int64(d.r.readUint64())) + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) default: switch { case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: // positive fixnum (always signed) - vt = valueTypeInt - v = int64(int8(bd)) + n.v = valueTypeInt + n.i = int64(int8(bd)) case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // negative fixnum - vt = valueTypeInt - v = int64(int8(bd)) + n.v = valueTypeInt + n.i = int64(int8(bd)) case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - var rvm string - vt = valueTypeString - v = &rvm + if d.h.WriteExt || d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() } else { - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) } - decodeFurther = true case bd == mpBin8, bd == mpBin16, bd == mpBin32: - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - decodeFurther = true + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - vt = valueTypeArray + n.v = valueTypeArray decodeFurther = true case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt clen := d.readExtLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(clen) - v = &re - vt = valueTypeExt + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else if d.br { + n.l = d.r.readx(uint(clen)) + } else { + n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } default: - decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) } } if !decodeFurther { d.bdRead = false } - return + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } } // int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { +func (d *msgpackDecDriver) DecodeInt64() (i int64) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case mpUint8: i = int64(uint64(d.r.readn1())) case mpUint16: - i = int64(uint64(d.r.readUint16())) + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) case mpUint32: - i = int64(uint64(d.r.readUint32())) + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) case mpUint64: - i = int64(d.r.readUint64()) + i = int64(bigen.Uint64(d.r.readx(8))) case mpInt8: i = int64(int8(d.r.readn1())) case mpInt16: - i = int64(int16(d.r.readUint16())) + i = int64(int16(bigen.Uint16(d.r.readx(2)))) case mpInt32: - i = int64(int32(d.r.readUint32())) + i = int64(int32(bigen.Uint32(d.r.readx(4)))) case mpInt64: - i = int64(d.r.readUint64()) + i = int64(bigen.Uint64(d.r.readx(8))) default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: @@ -394,13 +594,8 @@ func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: i = int64(int8(d.bd)) default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) + d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } } d.bdRead = false @@ -408,54 +603,57 @@ func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { } // uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { +func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case mpUint8: ui = uint64(d.r.readn1()) case mpUint16: - ui = uint64(d.r.readUint16()) + ui = uint64(bigen.Uint16(d.r.readx(2))) case mpUint32: - ui = uint64(d.r.readUint32()) + ui = uint64(bigen.Uint32(d.r.readx(4))) case mpUint64: - ui = d.r.readUint64() + ui = bigen.Uint64(d.r.readx(8)) case mpInt8: if i := int64(int8(d.r.readn1())); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt16: - if i := int64(int16(d.r.readUint16())); i >= 0 { + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt32: - if i := int64(int32(d.r.readUint32())); i >= 0 { + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt64: - if i := int64(d.r.readUint64()); i >= 0 { + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: ui = uint64(d.bd) case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + return default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) + d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } } d.bdRead = false @@ -463,160 +661,173 @@ func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { } // float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case mpFloat: - f = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - f = math.Float64frombits(d.r.readUint64()) - default: - f = float64(d.decodeInt(0)) +func (d *msgpackDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt64()) } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) decodeBool() (b bool) { - switch d.bd { - case mpFalse, 0: +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { // b = false - case mpTrue, 1: + } else if d.bd == mpTrue || d.bd == 1 { b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else { + d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } d.bdRead = false return } -func (d *msgpackDecDriver) decodeString() (s string) { - clen := d.readContainerLen(msgpackContainerStr) - if clen > 0 { - s = string(d.r.readn(clen)) +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() } - d.bdRead = false - return -} -// Callers must check if changed=true (to decide whether to replace the one they have) -func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + bd := d.bd var clen int - switch d.bd { - case mpBin8, mpBin16, mpBin32: - clen = d.readContainerLen(msgpackContainerBin) - default: - clen = d.readContainerLen(msgpackContainerStr) - } - // if clen < 0 { - // changed = true - // panic("length cannot be zero. this cannot be nil.") - // } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - // Return changed=true if length of passed slice diff from length of bytes in stream - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // check if an "array" of uint8's + if zerocopy && len(bs) == 0 { + bs = d.d.b[:] } - d.r.readb(bs) + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } else { + d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + return } + d.bdRead = false - return + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) } -// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. -func (d *msgpackDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { d.bd = d.r.readn1() d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *msgpackDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - bd := d.bd - switch bd { - case mpNil: - d.bdType = valueTypeNil - case mpFalse, mpTrue: - d.bdType = valueTypeBool - case mpFloat, mpDouble: - d.bdType = valueTypeFloat - case mpUint8, mpUint16, mpUint32, mpUint64: - d.bdType = valueTypeUint - case mpInt8, mpInt16, mpInt32, mpInt64: - d.bdType = valueTypeInt - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - d.bdType = valueTypeInt - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - d.bdType = valueTypeInt - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - d.bdType = valueTypeString - } else { - d.bdType = valueTypeBytes - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.bdType = valueTypeBytes - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - d.bdType = valueTypeArray - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - d.bdType = valueTypeMap - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - d.bdType = valueTypeExt - default: - decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + // if bd == mpNil { + // // nil + // } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + // // binary + // } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + // // string/raw + // } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // // array + // } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + // // map + // } + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec) + return valueTypeString } + return valueTypeBytes // raw (old spec) + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap } - return d.bdType + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset } -func (d *msgpackDecDriver) tryDecodeAsNil() bool { +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } if d.bd == mpNil { d.bdRead = false return true } - return false + return } func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { bd := d.bd - switch { - case bd == mpNil: + if bd == mpNil { clen = -1 // to represent nil - case bd == ct.b8: + } else if bd == ct.b8 { clen = int(d.r.readn1()) - case bd == ct.b16: - clen = int(d.r.readUint16()) - case bd == ct.b32: - clen = int(d.r.readUint32()) - case (ct.bFixMin & bd) == ct.bFixMin: + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { clen = int(ct.bFixMin ^ bd) - default: - decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } else { + d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return } d.bdRead = false return } -func (d *msgpackDecDriver) readMapLen() int { +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerMap) } -func (d *msgpackDecDriver) readArrayLen() int { +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerList) } @@ -637,30 +848,107 @@ func (d *msgpackDecDriver) readExtLen() (clen int) { case mpExt8: clen = int(d.r.readn1()) case mpExt16: - clen = int(d.r.readUint16()) + clen = int(bigen.Uint16(d.r.readx(2))) case mpExt32: - clen = int(d.r.readUint32()) + clen = int(bigen.Uint32(d.r.readx(4))) default: - decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return } return } -func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - xbd := d.bd - switch { - case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: - xbs, _ = d.decodeBytes(nil) - case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, - xbd >= mpFixStrMin && xbd <= mpFixStrMax: - xbs = []byte(d.decodeString()) +func (d *msgpackDecDriver) DecodeTime() (t time.Time) { + // decode time from string bytes or ext + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else { + // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + return + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { + // bs = d.r.readx(clen) + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC() + case 8: + tv := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(d.r.readx(4)) + sec := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(sec), int64(nsec)).UTC() default: + d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { clen := d.readExtLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + return + } + if d.br { + xbs = d.r.readx(uint(clen)) + } else { + xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) } - xbs = d.r.readn(clen) } d.bdRead = false return @@ -672,35 +960,55 @@ func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs [ type MsgpackHandle struct { BasicHandle - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt controls whether the new spec is honored. + // + // With WriteExt=true, we can encode configured extensions with extension tags + // and encode string/[]byte/extensions in a way compatible with the new spec + // but incompatible with the old spec. // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. + // For compatibility with the old spec, set WriteExt=false. // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. + // With WriteExt=false: + // configured extensions are serialized as raw bytes (not msgpack extensions). + // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type + // are not encoded. WriteExt bool + + // PositiveIntUnsigned says to encode positive integers as unsigned. + PositiveIntUnsigned bool + + binaryEncodingType + noElemSeparators + + // _ [1]uint64 // padding } -func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { - return &msgpackEncDriver{w: w, h: h} +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) } -func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { - return &msgpackDecDriver{r: r, h: h} +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} } -func (h *MsgpackHandle) writeExt() bool { - return h.WriteExt +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (h *MsgpackHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false } //-------------------------------------------------- @@ -721,7 +1029,7 @@ func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) err bodyArr = []interface{}{body} } r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) + return c.write(r2, nil, false) } func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { @@ -733,7 +1041,7 @@ func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) e body = nil } r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) + return c.write(r2, nil, false) } func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -753,8 +1061,7 @@ func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { } func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.cls { + if cls := c.cls.load(); cls.closed { return io.EOF } @@ -767,28 +1074,34 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) // return // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } } - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", + msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } } return } @@ -801,7 +1114,8 @@ type msgpackSpecRpc struct{} // MsgpackSpecRpc implements Rpc using the communication protocol defined in // the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +// +// See GoRpc documentation, for information on buffering for better performance. var MsgpackSpecRpc msgpackSpecRpc func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go index d014dbdcc7d..3925088152d 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -1,99 +1,150 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( "bufio" + "errors" "io" "net/rpc" - "sync" ) +var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true") + // Rpc provides a rpc Server or Client Codec for rpc communication. type Rpc interface { ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec } -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer +// RPCOptions holds options specific to rpc functionality +type RPCOptions struct { + // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. + // + // Set RPCNoBuffer=true to turn buffering off. + // Buffering can still be done if buffered connections are passed in, or + // buffering is configured on the handle. + RPCNoBuffer bool } -// ------------------------------------- - // rpcCodec defines the struct members and common methods. type rpcCodec struct { - rwc io.ReadWriteCloser + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + dec *Decoder enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - cls bool -} + // bw *bufio.Writer + // br *bufio.Reader + h Handle -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - } + cls atomicClsErr } -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) + return newRPCCodec2(conn, conn, conn, h) } -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw +func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errRpcJsonNeedsTermWhitespace) + } + // always ensure that we use a flusher, and always flush what was written to the connection. + // we lose nothing by using a buffered writer internally. + f, ok := w.(ioFlusher) + bh := basicHandle(h) + if !bh.RPCNoBuffer { + if bh.WriterBufferSize <= 0 { + if !ok { + bw := bufio.NewWriter(w) + f, w = bw, bw + } + } + if bh.ReaderBufferSize <= 0 { + if _, ok = w.(ioPeeker); !ok { + if _, ok = w.(ioBuffered); !ok { + br := bufio.NewReader(r) + r = br + } + } + } + } + return rpcCodec{ + c: c, + w: w, + r: r, + f: f, + h: h, + enc: NewEncoder(w, h), + dec: NewDecoder(r, h), + } } -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.cls { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return + err = c.enc.Encode(obj1) + if err == nil { + if writeObj2 { + err = c.enc.Encode(obj2) } + // if err == nil && c.f != nil { + // err = c.f.Flush() + // } } - if doFlush && c.bw != nil { - return c.bw.Flush() + if c.f != nil { + if err == nil { + err = c.f.Flush() + } else { + _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + } } return } +func (c *rpcCodec) swallow(err *error) { + defer panicToErr(c.dec, err) + c.dec.swallow() +} + func (c *rpcCodec) read(obj interface{}) (err error) { - if c.cls { - return io.EOF + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } } - //If nil is passed in, we should still attempt to read content to nowhere. + //If nil is passed in, we should read and discard if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) + // var obj2 interface{} + // return c.dec.Decode(&obj2) + c.swallow(&err) + return } return c.dec.Decode(obj) } func (c *rpcCodec) Close() error { - if c.cls { - return io.EOF + if c.c == nil { + return nil + } + cls := c.cls.load() + if cls.closed { + return cls.errClosed } - c.cls = true - return c.rwc.Close() + cls.errClosed = c.c.Close() + cls.closed = true + c.cls.store(cls) + return cls.errClosed } func (c *rpcCodec) ReadResponseBody(body interface{}) error { @@ -107,16 +158,11 @@ type goRpcCodec struct { } func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) + return c.write(r, body, true) } func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) + return c.write(r, body, true) } func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -138,7 +184,36 @@ func (c *goRpcCodec) ReadRequestBody(body interface{}) error { type goRpc struct{} // GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +// +// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. +// +// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. +// This ensures we use an adequate buffer during reading and writing. +// If not configured, we will internally initialize and use a buffer during reads and writes. +// This can be turned off via the RPCNoBuffer option on the Handle. +// var handle codec.JsonHandle +// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer +// +// Example 1: one way of configuring buffering explicitly: +// var handle codec.JsonHandle // codec handle +// handle.ReaderBufferSize = 1024 +// handle.WriterBufferSize = 1024 +// var conn io.ReadWriteCloser // connection got from a socket +// var serverCodec = GoRpc.ServerCodec(conn, handle) +// var clientCodec = GoRpc.ClientCodec(conn, handle) +// +// Example 2: you can also explicitly create a buffered connection yourself, +// and not worry about configuring the buffer sizes in the Handle. +// var handle codec.Handle // codec handle +// var conn io.ReadWriteCloser // connection got from a socket +// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser +// io.Closer +// *bufio.Reader +// *bufio.Writer +// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} +// var serverCodec = GoRpc.ServerCodec(bufconn, handle) +// var clientCodec = GoRpc.ClientCodec(bufconn, handle) +// var GoRpc goRpc func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { @@ -148,5 +223,3 @@ func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { return &goRpcCodec{newRPCCodec(conn, h)} } - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go index 9e4d148a2a1..a3257c1a7bd 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -1,9 +1,13 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec -import "math" +import ( + "math" + "reflect" + "time" +) const ( _ uint8 = iota @@ -17,6 +21,8 @@ const ( simpleVdPosInt = 8 simpleVdNegInt = 12 + simpleVdTime = 24 + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) simpleVdString = 216 simpleVdByteArray = 224 @@ -26,23 +32,27 @@ const ( ) type simpleEncDriver struct { + noBuiltInTypes + // encNoSeparator + e *Encoder h *SimpleHandle - w encWriter - //b [8]byte -} - -func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { - return false + w *encWriterSwitch + b [8]byte + // c containerState + encDriverTrackContainerWriter + // encDriverNoopContainerWriter + _ [3]uint64 // padding } -func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { -} - -func (e *simpleEncDriver) encodeNil() { +func (e *simpleEncDriver) EncodeNil() { e.w.writen1(simpleVdNil) } -func (e *simpleEncDriver) encodeBool(b bool) { +func (e *simpleEncDriver) EncodeBool(b bool) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b { + e.EncodeNil() + return + } if b { e.w.writen1(simpleVdTrue) } else { @@ -50,17 +60,25 @@ func (e *simpleEncDriver) encodeBool(b bool) { } } -func (e *simpleEncDriver) encodeFloat32(f float32) { +func (e *simpleEncDriver) EncodeFloat32(f float32) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } e.w.writen1(simpleVdFloat32) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *simpleEncDriver) encodeFloat64(f float64) { +func (e *simpleEncDriver) EncodeFloat64(f float64) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } e.w.writen1(simpleVdFloat64) - e.w.writeUint64(math.Float64bits(f)) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) } -func (e *simpleEncDriver) encodeInt(v int64) { +func (e *simpleEncDriver) EncodeInt(v int64) { if v < 0 { e.encUint(uint64(-v), simpleVdNegInt) } else { @@ -68,236 +86,323 @@ func (e *simpleEncDriver) encodeInt(v int64) { } } -func (e *simpleEncDriver) encodeUint(v uint64) { +func (e *simpleEncDriver) EncodeUint(v uint64) { e.encUint(v, simpleVdPosInt) } func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - switch { - case v <= math.MaxUint8: + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 { + e.EncodeNil() + return + } + if v <= math.MaxUint8 { e.w.writen2(bd, uint8(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd + 1) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.w.writen1(bd + 2) - e.w.writeUint32(uint32(v)) - case v <= math.MaxUint64: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { e.w.writen1(bd + 3) - e.w.writeUint64(v) + bigenHelper{e.b[:8], e.w}.writeUint64(v) } } func (e *simpleEncDriver) encLen(bd byte, length int) { - switch { - case length == 0: + if length == 0 { e.w.writen1(bd) - case length <= math.MaxUint8: + } else if length <= math.MaxUint8 { e.w.writen1(bd + 1) e.w.writen1(uint8(length)) - case length <= math.MaxUint16: + } else if length <= math.MaxUint16 { e.w.writen1(bd + 2) - e.w.writeUint16(uint16(length)) - case int64(length) <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { e.w.writen1(bd + 3) - e.w.writeUint32(uint32(length)) - default: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { e.w.writen1(bd + 4) - e.w.writeUint64(uint64(length)) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) } } +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(simpleVdExt, length) e.w.writen1(xtag) } -func (e *simpleEncDriver) encodeArrayPreamble(length int) { +func (e *simpleEncDriver) WriteArrayStart(length int) { + e.c = containerArrayStart e.encLen(simpleVdArray, length) } -func (e *simpleEncDriver) encodeMapPreamble(length int) { +func (e *simpleEncDriver) WriteMapStart(length int) { + e.c = containerMapStart e.encLen(simpleVdMap, length) } -func (e *simpleEncDriver) encodeString(c charEncoding, v string) { +// func (e *simpleEncDriver) EncodeSymbol(v string) { +// e.EncodeStringEnc(cUTF8, v) +// } + +func (e *simpleEncDriver) EncodeStringEnc(c charEncoding, v string) { + if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" { + e.EncodeNil() + return + } e.encLen(simpleVdString, len(v)) e.w.writestr(v) } -func (e *simpleEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + e.EncodeStringEnc(c, v) } -func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + e.EncodeStringBytesRaw(v) +} + +func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil { + if v == nil { + e.EncodeNil() + return + } e.encLen(simpleVdByteArray, len(v)) e.w.writeb(v) } +func (e *simpleEncDriver) EncodeTime(t time.Time) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { + if t.IsZero() { + e.EncodeNil() + return + } + v, err := t.MarshalBinary() + if err != nil { + e.e.errorv(err) + return + } + // time.Time marshalbinary takes about 14 bytes. + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) +} + //------------------------------------ type simpleDecDriver struct { + d *Decoder h *SimpleHandle - r decReader + r *decReaderSwitch bdRead bool - bdType valueType bd byte - //b [8]byte + br bool // a bytes reader? + c containerState + // b [scratchByteArrayLen]byte + noBuiltInTypes + // noStreamingCodec + decDriverNoopContainerReader + // _ [3]uint64 // padding } -func (d *simpleDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *simpleDecDriver) readNextBd() { d.bd = d.r.readn1() d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *simpleDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.bd { - case simpleVdNil: - d.bdType = valueTypeNil - case simpleVdTrue, simpleVdFalse: - d.bdType = valueTypeBool - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - d.bdType = valueTypeUint - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - d.bdType = valueTypeInt - case simpleVdFloat32, simpleVdFloat64: - d.bdType = valueTypeFloat - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - d.bdType = valueTypeString - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.bdType = valueTypeBytes - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - d.bdType = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - d.bdType = valueTypeArray - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) - } - } - return d.bdType } -func (d *simpleDecDriver) tryDecodeAsNil() bool { - if d.bd == simpleVdNil { +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() d.bdRead = false - return true } - return false } -func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { - return false +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdNil: + return valueTypeNil + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + return valueTypeBytes + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + return valueTypeString + case simpleVdArray, simpleVdArray + 1, + simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + return valueTypeArray + case simpleVdMap, simpleVdMap + 1, + simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + return valueTypeMap + // case simpleVdTime: + // return valueTypeTime + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset } -func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false } -func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case simpleVdPosInt: ui = uint64(d.r.readn1()) - i = int64(ui) case simpleVdPosInt + 1: - ui = uint64(d.r.readUint16()) - i = int64(ui) + ui = uint64(bigen.Uint16(d.r.readx(2))) case simpleVdPosInt + 2: - ui = uint64(d.r.readUint32()) - i = int64(ui) + ui = uint64(bigen.Uint32(d.r.readx(4))) case simpleVdPosInt + 3: - ui = uint64(d.r.readUint64()) - i = int64(ui) + ui = uint64(bigen.Uint64(d.r.readx(8))) case simpleVdNegInt: ui = uint64(d.r.readn1()) - i = -(int64(ui)) neg = true case simpleVdNegInt + 1: - ui = uint64(d.r.readUint16()) - i = -(int64(ui)) + ui = uint64(bigen.Uint16(d.r.readx(2))) neg = true case simpleVdNegInt + 2: - ui = uint64(d.r.readUint32()) - i = -(int64(ui)) + ui = uint64(bigen.Uint32(d.r.readx(4))) neg = true case simpleVdNegInt + 3: - ui = uint64(d.r.readUint64()) - i = -(int64(ui)) + ui = uint64(bigen.Uint64(d.r.readx(8))) neg = true default: - decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return } // don't do this check, because callers may only want the unsigned value. // if ui > math.MaxInt64 { - // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return // } return } -func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) +func (d *simpleDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } d.bdRead = false return } -func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() +func (d *simpleDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value to unsigned type") + return } - checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case simpleVdFloat32: - f = float64(math.Float32frombits(d.r.readUint32())) - case simpleVdFloat64: - f = math.Float64frombits(d.r.readUint64()) - default: +func (d *simpleDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - _, i, _ := d.decIntAny() - f = float64(i) + f = float64(d.DecodeInt64()) } else { - decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd) + return } } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) decodeBool() (b bool) { - switch d.bd { - case simpleVdTrue: +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { b = true - case simpleVdFalse: - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + return } d.bdRead = false return } -func (d *simpleDecDriver) readMapLen() (length int) { +func (d *simpleDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false + d.c = containerMapStart return d.decLen() } -func (d *simpleDecDriver) readArrayLen() (length int) { +func (d *simpleDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false + d.c = containerArrayStart return d.decLen() } +func (d *simpleDecDriver) ReadArrayElem() { + d.c = containerArrayElem +} + +func (d *simpleDecDriver) ReadArrayEnd() { + d.c = containerArrayEnd +} + +func (d *simpleDecDriver) ReadMapElemKey() { + d.c = containerMapKey +} + +func (d *simpleDecDriver) ReadMapElemValue() { + d.c = containerMapValue +} + +func (d *simpleDecDriver) ReadMapEnd() { + d.c = containerMapEnd +} + func (d *simpleDecDriver) decLen() int { switch d.bd % 8 { case 0: @@ -305,116 +410,196 @@ func (d *simpleDecDriver) decLen() int { case 1: return int(d.r.readn1()) case 2: - return int(d.r.readUint16()) + return int(bigen.Uint16(d.r.readx(2))) case 3: - ui := uint64(d.r.readUint32()) - checkOverflow(ui, 0, intBitsize) + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } return int(ui) case 4: - ui := d.r.readUint64() - checkOverflow(ui, 0, intBitsize) + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } return int(ui) } - decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } -func (d *simpleDecDriver) decodeString() (s string) { - s = string(d.r.readn(d.decLen())) - d.bdRead = false - return +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) } -func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - if clen := d.decLen(); clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true +func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 { + if len(bs) == 0 && zerocopy { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] } - d.r.readb(bs) + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + if d.bd != simpleVdTime { + d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + return } d.bdRead = false + clen := int(d.r.readn1()) + b := d.r.readx(uint(clen)) + if err := (&t).UnmarshalBinary(b); err != nil { + d.d.errorv(err) + } + return +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } return } -func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return } - xbs = d.r.readn(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs, _ = d.decodeBytes(nil) + if d.br { + xbs = d.r.readx(uint(l)) + } else { + xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, true) default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + return } d.bdRead = false return } -func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool switch d.bd { case simpleVdNil: - vt = valueTypeNil + n.v = valueTypeNil case simpleVdFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case simpleVdTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - vt = valueTypeUint - ui, _, _ := d.decIntAny() - v = ui + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - vt = valueTypeInt - _, i, _ := d.decIntAny() - v = i + n.v = valueTypeInt + n.i = d.DecodeInt64() case simpleVdFloat32: - vt = valueTypeFloat - v = d.decodeFloat(true) + n.v = valueTypeFloat + n.f = d.DecodeFloat64() case simpleVdFloat64: - vt = valueTypeFloat - v = d.decodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - vt = valueTypeString - v = d.decodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdTime: + n.v = valueTypeTime + n.t = d.DecodeTime() + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - vt = valueTypeExt + n.v = valueTypeExt l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - vt = valueTypeArray + n.u = uint64(d.r.readn1()) + if d.br { + n.l = d.r.readx(uint(l)) + } else { + n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, + simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray decodeFurther = true case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) } if !decodeFurther { d.bdRead = false } - return } //------------------------------------ @@ -422,12 +607,12 @@ func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurt // SimpleHandle is a Handle for a very simple encoding format. // // simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - Encoding of a value is always preceded by the descriptor byte (bd) // - True, false, nil are encoded fully in 1 byte (the descriptor) // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. // - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) +// - Length of containers (strings, bytes, array, map, extensions) // are encoded in 0, 1, 2, 4 or 8 bytes. // Zero-length containers have no length encoded. // For others, the number of bytes is given by pow(2, bd%3) @@ -435,26 +620,46 @@ func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurt // - arrays are encoded as [bd] [length] [value]... // - extensions are encoded as [bd] [length] [tag] [byte]... // - strings/bytearrays are encoded as [bd] [length] [byte]... +// - time.Time are encoded as [bd] [length] [byte]... // // The full spec will be published soon. type SimpleHandle struct { BasicHandle + binaryEncodingType + noElemSeparators + // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil + EncZeroValuesAsNil bool + + // _ [1]uint64 // padding } -func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { - return &simpleEncDriver{w: w, h: h} +// Name returns the name of the handle: simple +func (h *SimpleHandle) Name() string { return "simple" } + +// SetBytesExt sets an extension +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} } -func (h *SimpleHandle) newDecDriver(r decReader) decDriver { - return &simpleDecDriver{r: r, h: h} +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (_ *SimpleHandle) writeExt() bool { - return true +func (e *simpleEncDriver) reset() { + e.c = 0 + e.w = e.e.w } -func (h *SimpleHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +func (d *simpleDecDriver) reset() { + d.c = 0 + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false } var _ decDriver = (*simpleDecDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json b/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json new file mode 100644 index 00000000000..9028586711e --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json @@ -0,0 +1,639 @@ +[ + { + "cbor": "AA==", + "hex": "00", + "roundtrip": true, + "decoded": 0 + }, + { + "cbor": "AQ==", + "hex": "01", + "roundtrip": true, + "decoded": 1 + }, + { + "cbor": "Cg==", + "hex": "0a", + "roundtrip": true, + "decoded": 10 + }, + { + "cbor": "Fw==", + "hex": "17", + "roundtrip": true, + "decoded": 23 + }, + { + "cbor": "GBg=", + "hex": "1818", + "roundtrip": true, + "decoded": 24 + }, + { + "cbor": "GBk=", + "hex": "1819", + "roundtrip": true, + "decoded": 25 + }, + { + "cbor": "GGQ=", + "hex": "1864", + "roundtrip": true, + "decoded": 100 + }, + { + "cbor": "GQPo", + "hex": "1903e8", + "roundtrip": true, + "decoded": 1000 + }, + { + "cbor": "GgAPQkA=", + "hex": "1a000f4240", + "roundtrip": true, + "decoded": 1000000 + }, + { + "cbor": "GwAAAOjUpRAA", + "hex": "1b000000e8d4a51000", + "roundtrip": true, + "decoded": 1000000000000 + }, + { + "cbor": "G///////////", + "hex": "1bffffffffffffffff", + "roundtrip": true, + "decoded": 18446744073709551615 + }, + { + "cbor": "wkkBAAAAAAAAAAA=", + "hex": "c249010000000000000000", + "roundtrip": true, + "decoded": 18446744073709551616 + }, + { + "cbor": "O///////////", + "hex": "3bffffffffffffffff", + "roundtrip": true, + "decoded": -18446744073709551616, + "skip": true + }, + { + "cbor": "w0kBAAAAAAAAAAA=", + "hex": "c349010000000000000000", + "roundtrip": true, + "decoded": -18446744073709551617 + }, + { + "cbor": "IA==", + "hex": "20", + "roundtrip": true, + "decoded": -1 + }, + { + "cbor": "KQ==", + "hex": "29", + "roundtrip": true, + "decoded": -10 + }, + { + "cbor": "OGM=", + "hex": "3863", + "roundtrip": true, + "decoded": -100 + }, + { + "cbor": "OQPn", + "hex": "3903e7", + "roundtrip": true, + "decoded": -1000 + }, + { + "cbor": "+QAA", + "hex": "f90000", + "roundtrip": true, + "decoded": 0.0 + }, + { + "cbor": "+YAA", + "hex": "f98000", + "roundtrip": true, + "decoded": -0.0 + }, + { + "cbor": "+TwA", + "hex": "f93c00", + "roundtrip": true, + "decoded": 1.0 + }, + { + "cbor": "+z/xmZmZmZma", + "hex": "fb3ff199999999999a", + "roundtrip": true, + "decoded": 1.1 + }, + { + "cbor": "+T4A", + "hex": "f93e00", + "roundtrip": true, + "decoded": 1.5 + }, + { + "cbor": "+Xv/", + "hex": "f97bff", + "roundtrip": true, + "decoded": 65504.0 + }, + { + "cbor": "+kfDUAA=", + "hex": "fa47c35000", + "roundtrip": true, + "decoded": 100000.0 + }, + { + "cbor": "+n9///8=", + "hex": "fa7f7fffff", + "roundtrip": true, + "decoded": 3.4028234663852886e+38 + }, + { + "cbor": "+3435DyIAHWc", + "hex": "fb7e37e43c8800759c", + "roundtrip": true, + "decoded": 1.0e+300 + }, + { + "cbor": "+QAB", + "hex": "f90001", + "roundtrip": true, + "decoded": 5.960464477539063e-08 + }, + { + "cbor": "+QQA", + "hex": "f90400", + "roundtrip": true, + "decoded": 6.103515625e-05 + }, + { + "cbor": "+cQA", + "hex": "f9c400", + "roundtrip": true, + "decoded": -4.0 + }, + { + "cbor": "+8AQZmZmZmZm", + "hex": "fbc010666666666666", + "roundtrip": true, + "decoded": -4.1 + }, + { + "cbor": "+XwA", + "hex": "f97c00", + "roundtrip": true, + "diagnostic": "Infinity" + }, + { + "cbor": "+X4A", + "hex": "f97e00", + "roundtrip": true, + "diagnostic": "NaN" + }, + { + "cbor": "+fwA", + "hex": "f9fc00", + "roundtrip": true, + "diagnostic": "-Infinity" + }, + { + "cbor": "+n+AAAA=", + "hex": "fa7f800000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+n/AAAA=", + "hex": "fa7fc00000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+v+AAAA=", + "hex": "faff800000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "+3/wAAAAAAAA", + "hex": "fb7ff0000000000000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+3/4AAAAAAAA", + "hex": "fb7ff8000000000000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+//wAAAAAAAA", + "hex": "fbfff0000000000000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "9A==", + "hex": "f4", + "roundtrip": true, + "decoded": false + }, + { + "cbor": "9Q==", + "hex": "f5", + "roundtrip": true, + "decoded": true + }, + { + "cbor": "9g==", + "hex": "f6", + "roundtrip": true, + "decoded": null + }, + { + "cbor": "9w==", + "hex": "f7", + "roundtrip": true, + "diagnostic": "undefined" + }, + { + "cbor": "8A==", + "hex": "f0", + "roundtrip": true, + "diagnostic": "simple(16)" + }, + { + "cbor": "+Bg=", + "hex": "f818", + "roundtrip": true, + "diagnostic": "simple(24)" + }, + { + "cbor": "+P8=", + "hex": "f8ff", + "roundtrip": true, + "diagnostic": "simple(255)" + }, + { + "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", + "hex": "c074323031332d30332d32315432303a30343a30305a", + "roundtrip": true, + "diagnostic": "0(\"2013-03-21T20:04:00Z\")" + }, + { + "cbor": "wRpRS2ew", + "hex": "c11a514b67b0", + "roundtrip": true, + "diagnostic": "1(1363896240)" + }, + { + "cbor": "wftB1FLZ7CAAAA==", + "hex": "c1fb41d452d9ec200000", + "roundtrip": true, + "diagnostic": "1(1363896240.5)" + }, + { + "cbor": "10QBAgME", + "hex": "d74401020304", + "roundtrip": true, + "diagnostic": "23(h'01020304')" + }, + { + "cbor": "2BhFZElFVEY=", + "hex": "d818456449455446", + "roundtrip": true, + "diagnostic": "24(h'6449455446')" + }, + { + "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", + "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", + "roundtrip": true, + "diagnostic": "32(\"http://www.example.com\")" + }, + { + "cbor": "QA==", + "hex": "40", + "roundtrip": true, + "diagnostic": "h''" + }, + { + "cbor": "RAECAwQ=", + "hex": "4401020304", + "roundtrip": true, + "diagnostic": "h'01020304'" + }, + { + "cbor": "YA==", + "hex": "60", + "roundtrip": true, + "decoded": "" + }, + { + "cbor": "YWE=", + "hex": "6161", + "roundtrip": true, + "decoded": "a" + }, + { + "cbor": "ZElFVEY=", + "hex": "6449455446", + "roundtrip": true, + "decoded": "IETF" + }, + { + "cbor": "YiJc", + "hex": "62225c", + "roundtrip": true, + "decoded": "\"\\" + }, + { + "cbor": "YsO8", + "hex": "62c3bc", + "roundtrip": true, + "decoded": "ü" + }, + { + "cbor": "Y+awtA==", + "hex": "63e6b0b4", + "roundtrip": true, + "decoded": "æ°´" + }, + { + "cbor": "ZPCQhZE=", + "hex": "64f0908591", + "roundtrip": true, + "decoded": "ð…‘" + }, + { + "cbor": "gA==", + "hex": "80", + "roundtrip": true, + "decoded": [ + + ] + }, + { + "cbor": "gwECAw==", + "hex": "83010203", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3 + ] + }, + { + "cbor": "gwGCAgOCBAU=", + "hex": "8301820203820405", + "roundtrip": true, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", + "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "oA==", + "hex": "a0", + "roundtrip": true, + "decoded": { + } + }, + { + "cbor": "ogECAwQ=", + "hex": "a201020304", + "roundtrip": true, + "skip": true, + "diagnostic": "{1: 2, 3: 4}" + }, + { + "cbor": "omFhAWFiggID", + "hex": "a26161016162820203", + "roundtrip": true, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhoWFiYWM=", + "hex": "826161a161626163", + "roundtrip": true, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", + "hex": "a56161614161626142616361436164614461656145", + "roundtrip": true, + "decoded": { + "a": "A", + "b": "B", + "c": "C", + "d": "D", + "e": "E" + } + }, + { + "cbor": "X0IBAkMDBAX/", + "hex": "5f42010243030405ff", + "roundtrip": false, + "skip": true, + "diagnostic": "(_ h'0102', h'030405')" + }, + { + "cbor": "f2VzdHJlYWRtaW5n/w==", + "hex": "7f657374726561646d696e67ff", + "roundtrip": false, + "decoded": "streaming" + }, + { + "cbor": "n/8=", + "hex": "9fff", + "roundtrip": false, + "decoded": [ + + ] + }, + { + "cbor": "nwGCAgOfBAX//w==", + "hex": "9f018202039f0405ffff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwGCAgOCBAX/", + "hex": "9f01820203820405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGCAgOfBAX/", + "hex": "83018202039f0405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGfAgP/ggQF", + "hex": "83019f0203ff820405", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", + "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", + "roundtrip": false, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "v2FhAWFinwID//8=", + "hex": "bf61610161629f0203ffff", + "roundtrip": false, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhv2FiYWP/", + "hex": "826161bf61626163ff", + "roundtrip": false, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "v2NGdW71Y0FtdCH/", + "hex": "bf6346756ef563416d7421ff", + "roundtrip": false, + "decoded": { + "Fun": true, + "Amt": -2 + } + } +] diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/test.py similarity index 69% rename from vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py rename to vendor/github.com/hashicorp/go-msgpack/codec/test.py index e933838c56a..800376f6841 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py +++ b/vendor/github.com/hashicorp/go-msgpack/codec/test.py @@ -4,7 +4,14 @@ # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). -import msgpack, msgpackrpc, sys, os, threading +# Ensure msgpack-python and cbor are installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python cbor + +# Ensure all "string" keys are utf strings (else encoded as bytes) + +import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type @@ -21,43 +28,52 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, + 6464.0, 6464646464.0, False, True, + u"null", None, - "someday", - "", - "bytestring", + u"some&day>some 0 @@ -80,14 +96,14 @@ def myStopRpcServer(): server.start() def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) @@ -102,7 +118,7 @@ def doMain(args): elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: - print("Usage: msgpack_test.py " + + print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go deleted file mode 100644 index c86d65328d7..00000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/time.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 00000000000..4e309e0b326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000000..da17640e644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000000..8943becf19b --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,62 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. +From 0.6.7 onward, Go 1.13+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000000..f40d2411cf1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,822 @@ +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the +// CheckRetry function indicates that a retry of the base request is not necessary. +// If an error is returned from this function, the CheckRetry policy will be used to determine +// whether to retry the whole request (including this handler). +// +// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code. +// +// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or +// by the caller out-of-band. Failure to do so will result in a memory leak. +type ResponseHandlerFunc func(*http.Response) error + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + responseHandler ResponseHandlerFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{body: bodyReader, Request: r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{body: bodyReader, Request: httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger is an interface that can be implemented by any logger or a +// logger wrapper to provide leveled logging. The methods accept a message +// string and a variadic number of key-value pairs. For log.Printf style +// formatting where message string contains a format specifier, use Logger +// interface. +type LeveledLogger interface { + Error(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once + clientInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + // don't propagate other errors + shouldRetry, _ := baseRetryPolicy(resp, err) + return shouldRetry, nil +} + +// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it +// propagates errors back instead of returning nil. This allows you to inspect +// why it decided to retry or not. +func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + return baseRetryPolicy(resp, err) +} + +func baseRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, v + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // 429 Too Many Requests is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +// +// It also tries to parse Retry-After response header when a http.StatusTooManyRequests +// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of +// seconds the server states it may be ready to process more requests from this client. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + if resp != nil { + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + if s, ok := resp.Header["Retry-After"]; ok { + if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { + return time.Second * time.Duration(sleep) + } + } + } + } + + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.clientInit.Do(func() { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + }) + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + } + + var resp *http.Response + var attempt int + var shouldRetry bool + var doErr, respErr, checkErr error + + for i := 0; ; i++ { + doErr, respErr = nil, nil + attempt++ + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + case Logger: + c.RequestLogHook(v, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, doErr = c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } + + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + case Logger: + c.ResponseLogHook(v, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + if !shouldRetry { + break + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if doErr == nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } + switch v := logger.(type) { + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } + } + timer := time.NewTimer(wait) + select { + case <-req.Context().Done(): + timer.Stop() + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-timer.C: + } + + // Make shallow copy of http Request so that we can modify its body + // without racing against the closeBody call in persistConn.writeLoop. + httpreq := *req.Request + req.Request = &httpreq + } + + // this is the closest we have to success criteria + if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { + return resp, nil + } + + defer c.HTTPClient.CloseIdleConnections() + + var err error + if checkErr != nil { + err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr + } + + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, attempt) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + c.drainBody(resp.Body) + } + + // this means CheckRetry thought the request was a failure, but didn't + // communicate why + if err == nil { + return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", + req.Method, req.URL, attempt) + } + + return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", + req.Method, req.URL, attempt, err) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case LeveledLogger: + v.Error("error reading response body", "error", err) + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..8f3ee358427 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,52 @@ +package retryablehttp + +import ( + "errors" + "net/http" + "net/url" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + resp, err := rt.Client.Do(retryableReq) + // If we got an error returned by standard library's `Do` method, unwrap it + // otherwise we will wind up erroneously re-nesting the error. + if _, ok := err.(*url.Error); ok { + return resp, errors.Unwrap(err) + } + + return resp, err +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go new file mode 100644 index 00000000000..d59ecbb2b79 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go @@ -0,0 +1,65 @@ +package parseutil + +import ( + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "strings" +) + +var ( + ErrNotAUrl = errors.New("not a url") + ErrNotParsed = errors.New("not a parsed value") +) + +// ParsePath parses a URL with schemes file://, env://, or any other. Depending +// on the scheme it will return specific types of data: +// +// * file:// will return a string with the file's contents +// +// * env:// will return a string with the env var's contents +// +// * Anything else will return the string as it was. Functionally this means +// anything for which Go's `url.Parse` function does not throw an error. If you +// want to ensure that this function errors if a known scheme is not found, use +// MustParsePath. +// +// On error, we return the original string along with the error. The caller can +// switch on errors.Is(err, ErrNotAUrl) to understand whether it was the parsing +// step that errored or something else (such as a file not found). This is +// useful to attempt to read a non-URL string from some resource, but where the +// original input may simply be a valid string of that type. +func ParsePath(path string) (string, error) { + return parsePath(path, false) +} + +// MustParsePath behaves like ParsePath but will return ErrNotAUrl if the value +// is not a URL with a scheme that can be parsed by this function. +func MustParsePath(path string) (string, error) { + return parsePath(path, true) +} + +func parsePath(path string, mustParse bool) (string, error) { + path = strings.TrimSpace(path) + parsed, err := url.Parse(path) + if err != nil { + return path, fmt.Errorf("error parsing url (%q): %w", err.Error(), ErrNotAUrl) + } + switch parsed.Scheme { + case "file": + contents, err := ioutil.ReadFile(strings.TrimPrefix(path, "file://")) + if err != nil { + return path, fmt.Errorf("error reading file at %s: %w", path, err) + } + return strings.TrimSpace(string(contents)), nil + case "env": + return strings.TrimSpace(os.Getenv(strings.TrimPrefix(path, "env://"))), nil + default: + if mustParse { + return "", ErrNotParsed + } + return path, nil + } +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go new file mode 100644 index 00000000000..e469499bdcd --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go @@ -0,0 +1,502 @@ +package parseutil + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/mitchellh/mapstructure" +) + +var validCapacityString = regexp.MustCompile("^[\t ]*([0-9]+)[\t ]?([kmgtKMGT][iI]?[bB])?[\t ]*$") + +// ParseCapacityString parses a capacity string and returns the number of bytes it represents. +// Capacity strings are things like 5gib or 10MB. Supported prefixes are kb, kib, mb, mib, gb, +// gib, tb, tib, which are not case sensitive. If no prefix is present, the number is assumed +// to be in bytes already. +func ParseCapacityString(in interface{}) (uint64, error) { + var cap uint64 + + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return cap, nil + } + + matches := validCapacityString.FindStringSubmatch(inp) + + // no sub-groups means we couldn't parse it + if len(matches) <= 1 { + return cap, errors.New("could not parse capacity from input") + } + + var multiplier uint64 = 1 + switch strings.ToLower(matches[2]) { + case "kb": + multiplier = 1000 + case "kib": + multiplier = 1024 + case "mb": + multiplier = 1000 * 1000 + case "mib": + multiplier = 1024 * 1024 + case "gb": + multiplier = 1000 * 1000 * 1000 + case "gib": + multiplier = 1024 * 1024 * 1024 + case "tb": + multiplier = 1000 * 1000 * 1000 * 1000 + case "tib": + multiplier = 1024 * 1024 * 1024 * 1024 + } + + size, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return cap, err + } + + cap = size * multiplier + case int: + cap = uint64(inp) + case int32: + cap = uint64(inp) + case int64: + cap = uint64(inp) + case uint: + cap = uint64(inp) + case uint32: + cap = uint64(inp) + case uint64: + cap = uint64(inp) + case float32: + cap = uint64(inp) + case float64: + cap = uint64(inp) + default: + return cap, errors.New("could not parse capacity from input") + } + + return cap, nil +} + +// Parse a duration from an arbitrary value (a string or numeric value) into +// a time.Duration; when units are missing (such as when a numeric type is +// provided), the duration is assumed to be in seconds. +func ParseDurationSecond(in interface{}) (time.Duration, error) { + var dur time.Duration + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return dur, nil + } + + if v, err := strconv.ParseInt(inp, 10, 64); err == nil { + return time.Duration(v) * time.Second, nil + } + + if strings.HasSuffix(inp, "d") { + v, err := strconv.ParseInt(inp[:len(inp)-1], 10, 64) + if err != nil { + return dur, err + } + return time.Duration(v) * 24 * time.Hour, nil + } + + var err error + if dur, err = time.ParseDuration(inp); err != nil { + return dur, err + } + case int: + dur = time.Duration(inp) * time.Second + case int32: + dur = time.Duration(inp) * time.Second + case int64: + dur = time.Duration(inp) * time.Second + case uint: + dur = time.Duration(inp) * time.Second + case uint32: + dur = time.Duration(inp) * time.Second + case uint64: + dur = time.Duration(inp) * time.Second + case float32: + dur = time.Duration(inp) * time.Second + case float64: + dur = time.Duration(inp) * time.Second + case time.Duration: + dur = inp + default: + return 0, errors.New("could not parse duration from input") + } + + return dur, nil +} + +// Parse an absolute timestamp from the provided arbitrary value (string or +// numeric value). When an untyped numeric value is provided, it is assumed +// to be seconds from the Unix Epoch. +func ParseAbsoluteTime(in interface{}) (time.Time, error) { + var t time.Time + switch inp := in.(type) { + case nil: + // return default of zero + return t, nil + case string: + // Allow RFC3339 with nanoseconds, or without, + // or an epoch time as an integer. + var err error + t, err = time.Parse(time.RFC3339Nano, inp) + if err == nil { + break + } + t, err = time.Parse(time.RFC3339, inp) + if err == nil { + break + } + epochTime, err := strconv.ParseInt(inp, 10, 64) + if err == nil { + t = time.Unix(epochTime, 0) + break + } + return t, errors.New("could not parse string as date and time") + case json.Number: + epochTime, err := inp.Int64() + if err != nil { + return t, err + } + t = time.Unix(epochTime, 0) + case int: + t = time.Unix(int64(inp), 0) + case int32: + t = time.Unix(int64(inp), 0) + case int64: + t = time.Unix(inp, 0) + case uint: + t = time.Unix(int64(inp), 0) + case uint32: + t = time.Unix(int64(inp), 0) + case uint64: + t = time.Unix(int64(inp), 0) + default: + return t, errors.New("could not parse time from input type") + } + return t, nil +} + +// ParseInt takes an arbitrary value (either a string or numeric type) and +// parses it as an int64 value. This value is assumed to be larger than the +// provided type, but cannot safely be cast. +// +// When the end value is bounded (such as an int value), it is recommended +// to instead call SafeParseInt or SafeParseIntRange to safely cast to a +// more restrictive type. +func ParseInt(in interface{}) (int64, error) { + var ret int64 + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch in.(type) { + case string: + inp := in.(string) + if inp == "" { + return 0, nil + } + var err error + left, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return ret, err + } + ret = left + case int: + ret = int64(in.(int)) + case int32: + ret = int64(in.(int32)) + case int64: + ret = in.(int64) + case uint: + ret = int64(in.(uint)) + case uint32: + ret = int64(in.(uint32)) + case uint64: + ret = int64(in.(uint64)) + default: + return 0, errors.New("could not parse value from input") + } + + return ret, nil +} + +// ParseDirectIntSlice behaves similarly to ParseInt, but accepts typed +// slices, returning a slice of int64s. +// +// If the starting value may not be in slice form (e.g.. a bare numeric value +// could be provided), it is suggested to call ParseIntSlice instead. +func ParseDirectIntSlice(in interface{}) ([]int64, error) { + var ret []int64 + + switch in.(type) { + case []int: + for _, v := range in.([]int) { + ret = append(ret, int64(v)) + } + case []int32: + for _, v := range in.([]int32) { + ret = append(ret, int64(v)) + } + case []int64: + // For consistency to ensure callers can always modify ret without + // impacting in. + for _, v := range in.([]int64) { + ret = append(ret, v) + } + case []uint: + for _, v := range in.([]uint) { + ret = append(ret, int64(v)) + } + case []uint32: + for _, v := range in.([]uint32) { + ret = append(ret, int64(v)) + } + case []uint64: + for _, v := range in.([]uint64) { + ret = append(ret, int64(v)) + } + case []json.Number: + for _, v := range in.([]json.Number) { + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + case []string: + for _, v := range in.([]string) { + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + default: + return nil, errors.New("could not parse value from input") + } + + return ret, nil +} + +// ParseIntSlice is a helper function for handling upgrades of optional +// slices; that is, if the API accepts a type similar to , +// nicely handle the common cases of providing only an int-ish, providing +// an actual slice of int-ishes, or providing a comma-separated list of +// numbers. +// +// When []int64 is not the desired final type (or the values should be +// range-bound), it is suggested to call SafeParseIntSlice or +// SafeParseIntSliceRange instead. +func ParseIntSlice(in interface{}) ([]int64, error) { + if ret, err := ParseInt(in); err == nil { + return []int64{ret}, nil + } + + if ret, err := ParseDirectIntSlice(in); err == nil { + return ret, nil + } + + if strings, err := ParseCommaStringSlice(in); err == nil { + var ret []int64 + for _, v := range strings { + if v == "" { + // Ignore empty fields + continue + } + + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + + return ret, nil + } + + return nil, errors.New("could not parse value from input") +} + +// Parses the provided arbitrary value as a boolean-like value. +func ParseBool(in interface{}) (bool, error) { + var result bool + if err := mapstructure.WeakDecode(in, &result); err != nil { + return false, err + } + return result, nil +} + +// Parses the provided arbitrary value as a string. +func ParseString(in interface{}) (string, error) { + var result string + if err := mapstructure.WeakDecode(in, &result); err != nil { + return "", err + } + return result, nil +} + +// Parses the provided string-like value as a comma-separated list of values. +func ParseCommaStringSlice(in interface{}) ([]string, error) { + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + + rawString, ok := in.(string) + if ok && rawString == "" { + return []string{}, nil + } + var result []string + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + if err := decoder.Decode(in); err != nil { + return nil, err + } + return strutil.TrimStrings(result), nil +} + +// Parses the specified value as one or more addresses, separated by commas. +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + out := make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return nil, fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing %v as string", v) + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return nil, fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return nil, fmt.Errorf("error parsing address %q: %w", addr, err) + } + out = append(out, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return out, nil +} + +// Parses the provided arbitrary value (see ParseInt), ensuring it is within +// the specified range (inclusive of bounds). If this range corresponds to a +// smaller type, the returned value can then be safely cast without risking +// overflow. +func SafeParseIntRange(in interface{}, min int64, max int64) (int64, error) { + raw, err := ParseInt(in) + if err != nil { + return 0, err + } + + if raw < min || raw > max { + return 0, fmt.Errorf("error parsing int value; out of range [%v to %v]: %v", min, max, raw) + } + + return raw, nil +} + +// Parses the specified arbitrary value (see ParseInt), ensuring that the +// resulting value is within the range for an int value. If no error occurred, +// the caller knows no overflow occurred. +func SafeParseInt(in interface{}) (int, error) { + raw, err := SafeParseIntRange(in, math.MinInt, math.MaxInt) + return int(raw), err +} + +// Parses the provided arbitrary value (see ParseIntSlice) into a slice of +// int64 values, ensuring each is within the specified range (inclusive of +// bounds). If this range corresponds to a smaller type, the returned value +// can then be safely cast without risking overflow. +// +// If elements is positive, it is used to ensure the resulting slice is +// bounded above by that many number of elements (inclusive). +func SafeParseIntSliceRange(in interface{}, minValue int64, maxValue int64, elements int) ([]int64, error) { + raw, err := ParseIntSlice(in) + if err != nil { + return nil, err + } + + if elements > 0 && len(raw) > elements { + return nil, fmt.Errorf("error parsing value from input: got %v but expected at most %v elements", len(raw), elements) + } + + for index, value := range raw { + if value < minValue || value > maxValue { + return nil, fmt.Errorf("error parsing value from input: element %v was outside of range [%v to %v]: %v", index, minValue, maxValue, value) + } + } + + return raw, nil +} + +// Parses the provided arbitrary value (see ParseIntSlice) into a slice of +// int values, ensuring the each resulting value in the slice is within the +// range for an int value. If no error occurred, the caller knows no overflow +// occurred. +// +// If elements is positive, it is used to ensure the resulting slice is +// bounded above by that many number of elements (inclusive). +func SafeParseIntSlice(in interface{}, elements int) ([]int, error) { + raw, err := SafeParseIntSliceRange(in, math.MinInt, math.MaxInt, elements) + if err != nil || raw == nil { + return nil, err + } + + var result = make([]int, 0, len(raw)) + for _, element := range raw { + result = append(result, int(element)) + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go new file mode 100644 index 00000000000..102462dc60e --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go @@ -0,0 +1,510 @@ +package strutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + "unicode" + + glob "github.com/ryanuber/go-glob" +) + +// StrListContainsGlob looks for a string in a list of strings and allows +// globs. +func StrListContainsGlob(haystack []string, needle string) bool { + for _, item := range haystack { + if glob.Glob(item, needle) { + return true + } + } + return false +} + +// StrListContains looks for a string in a list of strings. +func StrListContains(haystack []string, needle string) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} + +// StrListContainsCaseInsensitive looks for a string in a list of strings. +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + for _, item := range haystack { + if strings.EqualFold(item, needle) { + return true + } + } + return false +} + +// StrListSubset checks if a given list is a subset +// of another set +func StrListSubset(super, sub []string) bool { + for _, item := range sub { + if !StrListContains(super, item) { + return false + } + } + return true +} + +// ParseDedupAndSortStrings parses a comma separated list of strings +// into a slice of strings. The return slice will be sorted and will +// not contain duplicate or empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + +// ParseDedupLowercaseAndSortStrings parses a comma separated list of +// strings into a slice of strings. The return slice will be sorted and +// will not contain duplicate or empty items. The values will be converted +// to lower case. +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), true) +} + +// ParseKeyValues parses a comma separated list of `=` tuples +// into a map[string]string. +func ParseKeyValues(input string, out map[string]string, sep string) error { + if out == nil { + return fmt.Errorf("'out is nil") + } + + keyValues := ParseDedupLowercaseAndSortStrings(input, sep) + if len(keyValues) == 0 { + return nil + } + + for _, keyValue := range keyValues { + shards := strings.Split(keyValue, "=") + if len(shards) != 2 { + return fmt.Errorf("invalid format") + } + + key := strings.TrimSpace(shards[0]) + value := strings.TrimSpace(shards[1]) + if key == "" || value == "" { + return fmt.Errorf("invalid pair: key: %q value: %q", key, value) + } + out[key] = value + } + return nil +} + +// ParseArbitraryKeyValues parses arbitrary tuples. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * Comma separated list of `=` pairs +// * Base64 encoded string containing comma separated list of +// `=` pairs +// +// Input will be parsed into the output parameter, which should +// be a non-nil map[string]string. +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + input = strings.TrimSpace(input) + if input == "" { + return nil + } + if out == nil { + return fmt.Errorf("'out' is nil") + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &out) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a comma separated string of 'key=value' pairs. + if err = ParseKeyValues(input, out, sep); err != nil { + return fmt.Errorf("failed to parse the input: %w", err) + } + } + + // Validate the parsed input + for key, value := range out { + if key != "" && value == "" { + return fmt.Errorf("invalid value for key %q", key) + } + } + + return nil +} + +// ParseStringSlice parses a `sep`-separated list of strings into a +// []string with surrounding whitespace removed. +// +// The output will always be a valid slice but may be of length zero. +func ParseStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + splitStr := strings.Split(input, sep) + ret := make([]string, len(splitStr)) + for i, val := range splitStr { + ret[i] = strings.TrimSpace(val) + } + + return ret +} + +// ParseArbitraryStringSlice parses arbitrary string slice. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * `sep` separated list of values +// * Base64-encoded string containing a `sep` separated list of values +// +// Note that the separator is ignored if the input is found to already be in a +// structured format (e.g., JSON) +// +// The output will always be a valid slice but may be of length zero. +func ParseArbitraryStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + ret := []string{} + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &ret) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a separated string of values. + return ParseStringSlice(input, sep) + } + + if ret == nil { + return []string{} + } + + return ret +} + +// TrimStrings takes a slice of strings and returns a slice of strings +// with trimmed spaces +func TrimStrings(items []string) []string { + ret := make([]string, len(items)) + for i, item := range items { + ret[i] = strings.TrimSpace(item) + } + return ret +} + +// RemoveDuplicates removes duplicate and empty elements from a slice of +// strings. This also may convert the items in the slice to lower case and +// returns a sorted slice. +func RemoveDuplicates(items []string, lowercase bool) []string { + itemsMap := make(map[string]struct{}, len(items)) + for _, item := range items { + item = strings.TrimSpace(item) + if item == "" { + continue + } + if lowercase { + item = strings.ToLower(item) + } + itemsMap[item] = struct{}{} + } + items = make([]string, 0, len(itemsMap)) + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// RemoveDuplicatesStable removes duplicate and empty elements from a slice of +// strings, preserving order (and case) of the original slice. +// In all cases, strings are compared after trimming whitespace +// If caseInsensitive, strings will be compared after ToLower() +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + itemsMap := make(map[string]struct{}, len(items)) + deduplicated := make([]string, 0, len(items)) + + for _, item := range items { + key := strings.TrimSpace(item) + if _, ok := itemsMap[key]; ok || key == "" { + continue + } + if caseInsensitive { + key = strings.ToLower(key) + } + if _, ok := itemsMap[key]; ok { + continue + } + itemsMap[key] = struct{}{} + deduplicated = append(deduplicated, item) + } + return deduplicated +} + +// RemoveEmpty removes empty elements from a slice of +// strings +func RemoveEmpty(items []string) []string { + if len(items) == 0 { + return items + } + itemsSlice := make([]string, 0, len(items)) + for _, item := range items { + if item == "" { + continue + } + itemsSlice = append(itemsSlice, item) + } + return itemsSlice +} + +// EquivalentSlices checks whether the given string sets are equivalent, as in, +// they contain the same values. +func EquivalentSlices(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + // First we'll build maps to ensure unique values + mapA := make(map[string]struct{}, len(a)) + mapB := make(map[string]struct{}, len(b)) + for _, keyA := range a { + mapA[keyA] = struct{}{} + } + for _, keyB := range b { + mapB[keyB] = struct{}{} + } + + // Now we'll build our checking slices + sortedA := make([]string, 0, len(mapA)) + sortedB := make([]string, 0, len(mapB)) + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} + +// EqualStringMaps tests whether two map[string]string objects are equal. +// Equal means both maps have the same sets of keys and values. This function +// is 6-10x faster than a call to reflect.DeepEqual(). +func EqualStringMaps(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + + for k := range a { + v, ok := b[k] + if !ok || a[k] != v { + return false + } + } + + return true +} + +// StrListDelete removes the first occurrence of the given item from the slice +// of strings if the item exists. +func StrListDelete(s []string, d string) []string { + if s == nil { + return s + } + + for index, element := range s { + if element == d { + return append(s[:index], s[index+1:]...) + } + } + + return s +} + +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. +func GlobbedStringsMatch(item, val string) bool { + if len(item) < 2 { + return val == item + } + + hasPrefix := strings.HasPrefix(item, "*") + hasSuffix := strings.HasSuffix(item, "*") + + if hasPrefix && hasSuffix { + return strings.Contains(val, item[1:len(item)-1]) + } else if hasPrefix { + return strings.HasSuffix(val, item[1:]) + } else if hasSuffix { + return strings.HasPrefix(val, item[:len(item)-1]) + } + + return val == item +} + +// AppendIfMissing adds a string to a slice if the given string is not present +func AppendIfMissing(slice []string, i string) []string { + if StrListContains(slice, i) { + return slice + } + return append(slice, i) +} + +// MergeSlices adds an arbitrary number of slices together, uniquely +func MergeSlices(args ...[]string) []string { + all := map[string]struct{}{} + for _, slice := range args { + for _, v := range slice { + all[v] = struct{}{} + } + } + + result := make([]string, 0, len(all)) + for k := range all { + result = append(result, k) + } + sort.Strings(result) + return result +} + +// Difference returns the set difference (A - B) of the two given slices. The +// result will also remove any duplicated values in set A regardless of whether +// that matches any values in set B. +func Difference(a, b []string, lowercase bool) []string { + if len(a) == 0 { + return a + } + if len(b) == 0 { + if !lowercase { + return a + } + newA := make([]string, len(a)) + for i, v := range a { + newA[i] = strings.ToLower(v) + } + return newA + } + + a = RemoveDuplicates(a, lowercase) + b = RemoveDuplicates(b, lowercase) + + itemsMap := map[string]struct{}{} + for _, aVal := range a { + itemsMap[aVal] = struct{}{} + } + + // Perform difference calculation + for _, bVal := range b { + if _, ok := itemsMap[bVal]; ok { + delete(itemsMap, bVal) + } + } + + items := []string{} + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// GetString attempts to retrieve a value from the provided map and assert that it is a string. If the key does not +// exist in the map, this will return an empty string. If the key exists, but the value is not a string type, this will +// return an error. If no map or key is provied, this will return an error +func GetString(m map[string]interface{}, key string) (string, error) { + if m == nil { + return "", fmt.Errorf("missing map") + } + if key == "" { + return "", fmt.Errorf("missing key") + } + + rawVal, ok := m[key] + if !ok { + return "", nil + } + + str, ok := rawVal.(string) + if !ok { + return "", fmt.Errorf("invalid value at %s: is a %T", key, rawVal) + } + return str, nil +} + +// Printable returns true if all characters in the string are printable +// according to Unicode +func Printable(s string) bool { + return strings.IndexFunc(s, func(c rune) bool { + return !unicode.IsPrint(c) + }) == -1 +} + +// StringListToInterfaceList simply takes a []string and turns it into a +// []interface{} to satisfy the input requirements for other library functions +func StringListToInterfaceList(in []string) []interface{} { + ret := make([]interface{}, len(in)) + for i, v := range in { + ret[i] = v + } + return ret +} + +// Reverse reverses the input string +func Reverse(in string) string { + l := len(in) + out := make([]byte, l) + for i := 0; i <= l/2; i++ { + out[i], out[l-1-i] = in[l-1-i], in[i] + } + return string(out) +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 00000000000..15586a2b540 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 00000000000..cb63a32161b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.x + - tip + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 00000000000..84fd743f5cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 00000000000..c8223326ddc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 00000000000..d9a00f21d45 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,769 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + { + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + + // fill unusedNodeKeys with keys from the AST + // a slice because we have to do equals case fold to match Filter + unusedNodeKeys := make(map[string][]token.Pos, 0) + for i, item := range list.Items { + for _, k := range item.Keys { + // isNestedJSON returns true for e.g. bar in + // { "foo": { "bar": {...} } } + // This isn't an unused node key, so we want to skip it + isNestedJSON := i > 0 && len(item.Keys) > 1 + if !isNestedJSON && (k.Token.JSON || k.Token.Type == token.IDENT) { + fn := k.Token.Value().(string) + sl := unusedNodeKeys[fn] + unusedNodeKeys[fn] = append(sl, k.Token.Pos) + } + } + } + + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeyPositions": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used keys + usedKeys[fieldName] = struct{}{} + unusedNodeKeys = removeCaseFold(unusedNodeKeys, fieldName) + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + if len(unusedNodeKeys) > 0 { + // like decodedFields, populated the unusedKeys field(s) + for _, v := range unusedKeysVal { + v.Set(reflect.ValueOf(unusedNodeKeys)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} + +func removeCaseFold(xs map[string][]token.Pos, y string) map[string][]token.Pos { + var toDel []string + + for i := range xs { + if strings.EqualFold(i, y) { + toDel = append(toDel, i) + } + } + for _, i := range toDel { + delete(xs, i) + } + return xs +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 00000000000..575a20b50b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 00000000000..f5b6b93b94b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,226 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +var unknownPos token.Pos + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // If an Object has no members, it won't have a first item + // to use as position + if len(o.Items) == 0 { + return unknownPos + } + // Return the uninitialized position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // If a parsed object has no keys, there is no position + // for its first element. + if len(o.Keys) == 0 { + return unknownPos + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 00000000000..ba07ad42b02 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 00000000000..5c99381dfbf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 00000000000..64c83bcfb55 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,532 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 00000000000..624a18fe3a7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,652 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == utf8.RuneError && size == 1 { + s.err("illegal UTF-8 encoding") + return ch + } + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + if ch == '\x00' { + s.err("unexpected null character (0x00)") + return eof + } + + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start && ch != eof { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 00000000000..5f981eaa2f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 00000000000..e37c0664ecd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 00000000000..125a5f07298 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 00000000000..fe3f0f09502 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 00000000000..95a0c3eee65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 00000000000..d9993c2928a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 00000000000..1fca53c4cee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/vault/api/LICENSE b/vendor/github.com/hashicorp/vault/api/LICENSE new file mode 100644 index 00000000000..f4f97ee5853 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/README.md b/vendor/github.com/hashicorp/vault/api/README.md new file mode 100644 index 00000000000..7230ce779fe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/README.md @@ -0,0 +1,9 @@ +Vault API +================= + +This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. + +For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. +For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs). + +[![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go new file mode 100644 index 00000000000..ab38acfbaec --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -0,0 +1,112 @@ +package api + +import ( + "context" + "fmt" +) + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +type AuthMethod interface { + Login(ctx context.Context, client *Client) (*Secret, error) +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} + +// Login sets up the required request body for login requests to the given auth +// method's /login API endpoint, and then performs a write to it. After a +// successful login, this method will automatically set the client's token to +// the login response's ClientToken as well. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) Login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + if authMethod == nil { + return nil, fmt.Errorf("no auth method provided for login") + } + return a.login(ctx, authMethod) +} + +// MFALogin is a wrapper that helps satisfy Vault's MFA implementation. +// If optional credentials are provided a single-phase login will be attempted +// and the resulting Secret will contain a ClientToken if the authentication is successful. +// The client's token will also be set accordingly. +// +// If no credentials are provided a two-phase MFA login will be assumed and the resulting +// Secret will have a MFARequirement containing the MFARequestID to be used in a follow-up +// call to `sys/mfa/validate` or by passing it to the method (*Auth).MFAValidate. +func (a *Auth) MFALogin(ctx context.Context, authMethod AuthMethod, creds ...string) (*Secret, error) { + if len(creds) > 0 { + a.c.SetMFACreds(creds) + return a.login(ctx, authMethod) + } + + return a.twoPhaseMFALogin(ctx, authMethod) +} + +// MFAValidate validates an MFA request using the appropriate payload and a secret containing +// Auth.MFARequirement, like the one returned by MFALogin when credentials are not provided. +// Upon successful validation the client token will be set accordingly. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[string]interface{}) (*Secret, error) { + if mfaSecret == nil || mfaSecret.Auth == nil || mfaSecret.Auth.MFARequirement == nil { + return nil, fmt.Errorf("secret does not contain MFARequirements") + } + + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload) + if err != nil { + return nil, err + } + + return a.checkAndSetToken(s) +} + +// login performs the (*AuthMethod).Login() with the configured client and checks that a ClientToken is returned +func (a *Auth) login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in to auth method: %w", err) + } + + return a.checkAndSetToken(s) +} + +// twoPhaseMFALogin performs the (*AuthMethod).Login() with the configured client +// and checks that an MFARequirement is returned +func (a *Auth) twoPhaseMFALogin(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in: %w", err) + } + if s == nil || s.Auth == nil || s.Auth.MFARequirement == nil { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain MFARequirements") + } + return s, fmt.Errorf("assumed two-phase MFA login, returned secret is missing MFARequirements") + } + + return s, nil +} + +func (a *Auth) checkAndSetToken(s *Secret) (*Secret, error) { + if s == nil || s.Auth == nil || s.Auth.ClientToken == "" { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain ClientToken") + } + return s, fmt.Errorf("response did not return ClientToken, client token not set") + } + + a.c.SetToken(s.Auth.ClientToken) + + return s, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go new file mode 100644 index 00000000000..52be1e7852b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -0,0 +1,374 @@ +package api + +import ( + "context" + "net/http" +) + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateOrphanWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateOrphanWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create-orphan") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + return c.CreateWithRoleWithContext(context.Background(), opts, roleName) +} + +func (c *TokenAuth) CreateWithRoleWithContext(ctx context.Context, opts *TokenCreateRequest, roleName string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + return c.LookupWithContext(context.Background(), token) +} + +func (c *TokenAuth) LookupWithContext(ctx context.Context, token string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + return c.LookupAccessorWithContext(context.Background(), accessor) +} + +func (c *TokenAuth) LookupAccessorWithContext(ctx context.Context, accessor string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + return c.LookupSelfWithContext(context.Background()) +} + +func (c *TokenAuth) LookupSelfWithContext(ctx context.Context) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/auth/token/lookup-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { + return c.RenewAccessorWithContext(context.Background(), accessor, increment) +} + +func (c *TokenAuth) RenewAccessorWithContext(ctx context.Context, accessor string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/renew-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), token, increment) +} + +func (c *TokenAuth) RenewWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + return c.RenewSelfWithContext(context.Background(), increment) +} + +func (c *TokenAuth) RenewSelfWithContext(ctx context.Context, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RenewTokenAsSelf wraps RenewTokenAsSelfWithContext using context.Background. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + return c.RenewTokenAsSelfWithContext(context.Background(), token, increment) +} + +// RenewTokenAsSelfWithContext behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelfWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor wraps RevokeAccessorWithContext using context.Background. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + return c.RevokeAccessorWithContext(context.Background(), accessor) +} + +// RevokeAccessorWithContext revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessorWithContext(ctx context.Context, accessor string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/revoke-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan wraps RevokeOrphanWithContext using context.Background. +func (c *TokenAuth) RevokeOrphan(token string) error { + return c.RevokeOrphanWithContext(context.Background(), token) +} + +// RevokeOrphanWithContext revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphanWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-orphan") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf wraps RevokeSelfWithContext using context.Background. +func (c *TokenAuth) RevokeSelf(token string) error { + return c.RevokeSelfWithContext(context.Background(), token) +} + +// RevokeSelfWithContext revokes the token making the call. The `token` parameter is kept +// for backwards compatibility but is ignored; only the client's set token has +// an effect. +func (c *TokenAuth) RevokeSelfWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree wraps RevokeTreeWithContext using context.Background. +func (c *TokenAuth) RevokeTree(token string) error { + return c.RevokeTreeWithContext(context.Background(), token) +} + +// RevokeTreeWithContext is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTreeWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + Period string `json:"period,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` + Type string `json:"type"` + EntityAlias string `json:"entity_alias"` +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go new file mode 100644 index 00000000000..ef8a74f1a48 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -0,0 +1,1814 @@ +package api + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "golang.org/x/net/http2" + "golang.org/x/time/rate" +) + +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCACertBytes = "VAULT_CACERT_BYTES" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" + EnvHTTPProxy = "VAULT_HTTP_PROXY" + EnvVaultProxyAddr = "VAULT_PROXY_ADDR" + EnvVaultDisableRedirects = "VAULT_DISABLE_REDIRECTS" + HeaderIndex = "X-Vault-Index" + HeaderForward = "X-Vault-Forward" + HeaderInconsistent = "X-Vault-Inconsistent" + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + "but the client is configured to use TLS. Please either enable TLS\n" + + "on the server or run the client with -address set to an address\n" + + "that uses the http protocol:\n\n" + + " vault -address http://
\n\n" + + "You can also set the VAULT_ADDR environment variable:\n\n\n" + + " VAULT_ADDR=http://
vault \n\n" + + "where
is replaced by the actual address to the server." +) + +// Deprecated values +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. Response wrapping will only be used when the return +// value is not the empty string. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + modifyLock sync.RWMutex + + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // AgentAddress is the address of the local Vault agent. This should be a + // complete URL such as "http://vault.example.com". + AgentAddress string + + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). + HttpClient *http.Client + + // MinRetryWait controls the minimum time to wait before retrying when a 5xx + // error occurs. Defaults to 1000 milliseconds. + MinRetryWait time.Duration + + // MaxRetryWait controls the maximum time to wait before retrying when a 5xx + // error occurs. Defaults to 1500 milliseconds. + MaxRetryWait time.Duration + + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). + MaxRetries int + + // Timeout, given a non-negative value, will apply the request timeout + // to each request function unless an earlier deadline is passed to the + // request function through context.Context. Note that this timeout is + // not applicable to Logical().ReadRaw* (raw response) functions. + // Defaults to 60 seconds. + Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // The CheckRetry function to use; a default is used if not provided + CheckRetry retryablehttp.CheckRetry + + // Logger is the leveled logger to provide to the retryable HTTP client. + Logger retryablehttp.LeveledLogger + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter + + // OutputCurlString causes the actual request to return an error of type + // *OutputStringError. Type asserting the error message will allow + // fetching a cURL-compatible string for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputCurlString bool + + // OutputPolicy causes the actual request to return an error of type + // *OutputPolicyError. Type asserting the error message will display + // an example of the required policy HCL needed for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputPolicy bool + + // curlCACert, curlCAPath, curlClientCert and curlClientKey are used to keep + // track of the name of the TLS certs and keys when OutputCurlString is set. + // Cloning a client will also not clone those values. + curlCACert, curlCAPath string + curlClientCert, curlClientKey string + + // SRVLookup enables the client to lookup the host through DNS SRV lookup + SRVLookup bool + + // CloneHeaders ensures that the source client's headers are copied to + // its clone. + CloneHeaders bool + + // CloneToken from parent. + CloneToken bool + + // ReadYourWrites ensures isolated read-after-write semantics by + // providing discovered cluster replication states in each request. + // The shared state is automatically propagated to all Client clones. + // + // Note: Careful consideration should be made prior to enabling this setting + // since there will be a performance penalty paid upon each request. + // This feature requires Enterprise server-side. + ReadYourWrites bool + + // DisableRedirects when set to true, will prevent the client from + // automatically following a (single) redirect response to its initial + // request. This behavior may be desirable if using Vault CLI on the server + // side. + // + // Note: Disabling redirect following behavior could cause issues with + // commands such as 'vault operator raft snapshot' as this redirects to the + // primary node. + DisableRedirects bool +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Vault. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Vault server SSL certificate. It takes precedence over CACertBytes + // and CAPath. + CACert string + + // CACertBytes is a PEM-encoded certificate or bundle. It takes precedence + // over CAPath. + CACertBytes []byte + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Vault server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Vault communication + ClientCert string + + // ClientKey is the path to the private key for Vault communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, the Error field on the returned *Config will be populated with the specific error. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + HttpClient: cleanhttp.DefaultPooledClient(), + Timeout: time.Second * 60, + MinRetryWait: time.Millisecond * 1000, + MaxRetryWait: time.Millisecond * 1500, + MaxRetries: 2, + Backoff: retryablehttp.LinearJitterBackoff, + } + + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err + return config + } + + if err := config.ReadEnvironment(); err != nil { + config.Error = err + return config + } + + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + + return config +} + +// configureTLS is a lock free version of ConfigureTLS that can be used in +// ReadEnvironment where the lock is already hold +func (c *Config) configureTLS(t *TLSConfig) error { + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + var clientCert tls.Certificate + foundClientCert := false + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + c.curlClientCert = t.ClientCert + c.curlClientKey = t.ClientKey + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("both client cert and client key must be provided") + } + + if t.CACert != "" || len(t.CACertBytes) != 0 || t.CAPath != "" { + c.curlCACert = t.CACert + c.curlCAPath = t.CAPath + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CACertificate: t.CACertBytes, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + } + + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true + } + + if foundClientCert { + // We use this function to ignore the server's preferential list of + // CAs, otherwise any CA used for the cert auth backend must be in the + // server's CA pool + clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCert, nil + } + } + + if t.TLSServerName != "" { + clientTLSConfig.ServerName = t.TLSServerName + } + + return nil +} + +// ConfigureTLS takes a set of TLS configurations and applies those to the +// HTTP client. +func (c *Config) ConfigureTLS(t *TLSConfig) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + return c.configureTLS(t) +} + +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envAgentAddress string + var envCACert string + var envCACertBytes []byte + var envCAPath string + var envClientCert string + var envClientKey string + var envClientTimeout time.Duration + var envInsecure bool + var envTLSServerName string + var envMaxRetries *uint64 + var envSRVLookup bool + var limit *rate.Limiter + var envVaultProxy string + var envVaultDisableRedirects bool + + // Parse the environment variables + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultAgentAddr); v != "" { + envAgentAddress = v + } + if v := os.Getenv(EnvVaultMaxRetries); v != "" { + maxRetries, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + envMaxRetries = &maxRetries + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCACertBytes); v != "" { + envCACertBytes = []byte(v) + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } + if v := os.Getenv(EnvVaultSkipVerify); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSkipVerify) + } + } + if v := os.Getenv(EnvVaultSRVLookup); v != "" { + var err error + envSRVLookup, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSRVLookup) + } + } + + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + + if v := os.Getenv(EnvHTTPProxy); v != "" { + envVaultProxy = v + } + + // VAULT_PROXY_ADDR supersedes VAULT_HTTP_PROXY + if v := os.Getenv(EnvVaultProxyAddr); v != "" { + envVaultProxy = v + } + + if v := os.Getenv(EnvVaultDisableRedirects); v != "" { + var err error + envVaultDisableRedirects, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultDisableRedirects) + } + + c.DisableRedirects = envVaultDisableRedirects + } + + // Configure the HTTP clients TLS configuration. + t := &TLSConfig{ + CACert: envCACert, + CACertBytes: envCACertBytes, + CAPath: envCAPath, + ClientCert: envClientCert, + ClientKey: envClientKey, + TLSServerName: envTLSServerName, + Insecure: envInsecure, + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.SRVLookup = envSRVLookup + c.Limiter = limit + + if err := c.configureTLS(t); err != nil { + return err + } + + if envAddress != "" { + c.Address = envAddress + } + + if envAgentAddress != "" { + c.AgentAddress = envAgentAddress + } + + if envMaxRetries != nil { + c.MaxRetries = int(*envMaxRetries) + } + + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + + if envVaultProxy != "" { + u, err := url.Parse(envVaultProxy) + if err != nil { + return err + } + + transport := c.HttpClient.Transport.(*http.Transport) + transport.Proxy = http.ProxyURL(u) + } + + return nil +} + +// ParseAddress transforms the provided address into a url.URL and handles +// the case of Unix domain sockets by setting the DialContext in the +// configuration's HttpClient.Transport. This function must be called with +// c.modifyLock held for write access. +func (c *Config) ParseAddress(address string) (*url.URL, error) { + u, err := url.Parse(address) + if err != nil { + return nil, err + } + + c.Address = address + + if strings.HasPrefix(address, "unix://") { + // When the address begins with unix://, always change the transport's + // DialContext (to match previous behaviour) + socket := strings.TrimPrefix(address, "unix://") + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = socket + u.Path = "" + } else { + return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") + } + } else if strings.HasPrefix(c.Address, "unix://") { + // When the address being set does not begin with unix:// but the previous + // address in the Config did, change the transport's DialContext back to + // use the default configuration that cleanhttp uses. + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = cleanhttp.DefaultPooledTransport().DialContext + } + } + + return u, nil +} + +func parseRateLimit(val string) (rate float64, burst int, err error) { + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err +} + +// Client is the client to the Vault API. Create a client with NewClient. +type Client struct { + modifyLock sync.RWMutex + addr *url.URL + config *Config + token string + headers http.Header + wrappingLookupFunc WrappingLookupFunc + mfaCreds []string + policyOverride bool + requestCallbacks []RequestCallback + responseCallbacks []ResponseCallback + replicationStateStore *replicationStateStore +} + +// NewClient returns a new client for the given configuration. +// +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") + } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } + + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.MinRetryWait == 0 { + c.MinRetryWait = def.MinRetryWait + } + + if c.MaxRetryWait == 0 { + c.MaxRetryWait = def.MaxRetryWait + } + + if c.HttpClient == nil { + c.HttpClient = def.HttpClient + } + if c.HttpClient.Transport == nil { + c.HttpClient.Transport = def.HttpClient.Transport + } + + address := c.Address + if c.AgentAddress != "" { + address = c.AgentAddress + } + + u, err := c.ParseAddress(address) + if err != nil { + return nil, err + } + + client := &Client{ + addr: u, + config: c, + headers: make(http.Header), + } + + if c.ReadYourWrites { + client.replicationStateStore = &replicationStateStore{} + } + + // Add the VaultRequest SSRF protection header + client.headers[RequestHeaderName] = []string{"true"} + + if token := os.Getenv(EnvVaultToken); token != "" { + client.token = token + } + + if namespace := os.Getenv(EnvVaultNamespace); namespace != "" { + client.setNamespace(namespace) + } + + return client, nil +} + +func (c *Client) CloneConfig() *Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + newConfig := DefaultConfig() + newConfig.Address = c.config.Address + newConfig.AgentAddress = c.config.AgentAddress + newConfig.MinRetryWait = c.config.MinRetryWait + newConfig.MaxRetryWait = c.config.MaxRetryWait + newConfig.MaxRetries = c.config.MaxRetries + newConfig.Timeout = c.config.Timeout + newConfig.Backoff = c.config.Backoff + newConfig.CheckRetry = c.config.CheckRetry + newConfig.Logger = c.config.Logger + newConfig.Limiter = c.config.Limiter + newConfig.SRVLookup = c.config.SRVLookup + newConfig.CloneHeaders = c.config.CloneHeaders + newConfig.CloneToken = c.config.CloneToken + newConfig.ReadYourWrites = c.config.ReadYourWrites + + // we specifically want a _copy_ of the client here, not a pointer to the original one + newClient := *c.config.HttpClient + newConfig.HttpClient = &newClient + + return newConfig +} + +// SetAddress sets the address of Vault in the client. The format of address should be +// "://:". Setting this on a client will override the +// value of VAULT_ADDR environment variable. +func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + parsedAddr, err := c.config.ParseAddress(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) + } + + c.addr = parsedAddr + return nil +} + +// Address returns the Vault URL the client is configured to connect to +func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.addr.String() +} + +func (c *Client) SetCheckRedirect(f func(*http.Request, []*http.Request) error) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.CheckRedirect = f +} + +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + +func (c *Client) Limiter() *rate.Limiter { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Limiter +} + +// SetMinRetryWait sets the minimum time to wait before retrying in the case of certain errors. +func (c *Client) SetMinRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MinRetryWait = retryWait +} + +func (c *Client) MinRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MinRetryWait +} + +// SetMaxRetryWait sets the maximum time to wait before retrying in the case of certain errors. +func (c *Client) SetMaxRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetryWait = retryWait +} + +func (c *Client) MaxRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetryWait +} + +// SetMaxRetries sets the number of retries that will be used in the case of certain errors +func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetries = retries +} + +func (c *Client) SetMaxIdleConnections(idle int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns = idle +} + +func (c *Client) MaxIdleConnections() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + return c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns +} + +func (c *Client) SetDisableKeepAlives(disable bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives = disable +} + +func (c *Client) DisableKeepAlives() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives +} + +func (c *Client) MaxRetries() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetries +} + +func (c *Client) SetSRVLookup(srv bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.SRVLookup = srv +} + +func (c *Client) SRVLookup() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.SRVLookup +} + +// SetCheckRetry sets the CheckRetry function to be used for future requests. +func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CheckRetry = checkRetry +} + +func (c *Client) CheckRetry() retryablehttp.CheckRetry { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CheckRetry +} + +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Timeout = timeout +} + +func (c *Client) ClientTimeout() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Timeout +} + +func (c *Client) OutputCurlString() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputCurlString +} + +func (c *Client) SetOutputCurlString(curl bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputCurlString = curl +} + +func (c *Client) OutputPolicy() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputPolicy +} + +func (c *Client) SetOutputPolicy(isSet bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputPolicy = isSet +} + +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.wrappingLookupFunc +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.wrappingLookupFunc = lookupFunc +} + +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.mfaCreds = creds +} + +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.setNamespace(namespace) +} + +func (c *Client) setNamespace(namespace string) { + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(NamespaceHeaderName, namespace) +} + +// ClearNamespace removes the namespace header if set. +func (c *Client) ClearNamespace() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers != nil { + c.headers.Del(NamespaceHeaderName) + } +} + +// Namespace returns the namespace currently set in this client. It will +// return an empty string if there is no namespace set. +func (c *Client) Namespace() string { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers == nil { + return "" + } + return c.headers.Get(NamespaceHeaderName) +} + +// WithNamespace makes a shallow copy of Client, modifies it to use +// the given namespace, and returns it. Passing an empty string will +// temporarily unset the namespace. +func (c *Client) WithNamespace(namespace string) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.headers = c.Headers() + if namespace == "" { + c2.ClearNamespace() + } else { + c2.SetNamespace(namespace) + } + return &c2 +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = "" +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Backoff = backoff +} + +func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Logger = logger +} + +// SetCloneHeaders to allow headers to be copied whenever the client is cloned. +func (c *Client) SetCloneHeaders(cloneHeaders bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneHeaders = cloneHeaders +} + +// CloneHeaders gets the configured CloneHeaders value. +func (c *Client) CloneHeaders() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneHeaders +} + +// SetCloneToken from parent +func (c *Client) SetCloneToken(cloneToken bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneToken = cloneToken +} + +// CloneToken gets the configured CloneToken value. +func (c *Client) CloneToken() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneToken +} + +// SetReadYourWrites to prevent reading stale cluster replication state. +func (c *Client) SetReadYourWrites(preventStaleReads bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + if preventStaleReads { + if c.replicationStateStore == nil { + c.replicationStateStore = &replicationStateStore{} + } + } else { + c.replicationStateStore = nil + } + + c.config.ReadYourWrites = preventStaleReads +} + +// ReadYourWrites gets the configured value of ReadYourWrites +func (c *Client) ReadYourWrites() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.ReadYourWrites +} + +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. The headers are cloned based on the CloneHeaders property of the +// source config +// +// Also, only the client's config is currently copied; this means items not in +// the api.Config struct, such as policy override and wrapping function +// behavior, must currently then be set as desired on the new client. +func (c *Client) Clone() (*Client, error) { + return c.clone(c.config.CloneHeaders) +} + +// CloneWithHeaders creates a new client similar to Clone, with the difference +// being that the headers are always cloned +func (c *Client) CloneWithHeaders() (*Client, error) { + return c.clone(true) +} + +// clone creates a new client, with the headers being cloned based on the +// passed in cloneheaders boolean +func (c *Client) clone(cloneHeaders bool) (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + config := c.config + config.modifyLock.RLock() + defer config.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MinRetryWait: config.MinRetryWait, + MaxRetryWait: config.MaxRetryWait, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + CheckRetry: config.CheckRetry, + Logger: config.Logger, + Limiter: config.Limiter, + AgentAddress: config.AgentAddress, + SRVLookup: config.SRVLookup, + CloneHeaders: config.CloneHeaders, + CloneToken: config.CloneToken, + ReadYourWrites: config.ReadYourWrites, + } + client, err := NewClient(newConfig) + if err != nil { + return nil, err + } + + if cloneHeaders { + client.SetHeaders(c.Headers().Clone()) + } + + if config.CloneToken { + client.SetToken(c.token) + } + + client.replicationStateStore = c.replicationStateStore + + return client, nil +} + +// SetPolicyOverride sets whether requests should be sent with the policy +// override flag to request overriding soft-mandatory Sentinel policies (both +// RGPs and EGPs) +func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.policyOverride = override +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + policyOverride := c.policyOverride + c.modifyLock.RUnlock() + + host := addr.Host + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + // Internet Draft specifies that the SRV record is ignored if a port is given + if addr.Port() == "" && c.config.SRVLookup { + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + + req := &Request{ + Method: method, + URL: &url.URL{ + User: addr.User, + Scheme: addr.Scheme, + Host: host, + Path: path.Join(addr.Path, requestPath), + }, + Host: addr.Host, + ClientToken: token, + Params: make(map[string][]string), + } + + var lookupPath string + switch { + case strings.HasPrefix(requestPath, "/v1/"): + lookupPath = strings.TrimPrefix(requestPath, "/v1/") + case strings.HasPrefix(requestPath, "v1/"): + lookupPath = strings.TrimPrefix(requestPath, "v1/") + default: + lookupPath = requestPath + } + + req.MFAHeaderVals = mfaCreds + + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) + } else { + req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) + } + + req.Headers = c.Headers() + req.PolicyOverride = policyOverride + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. +func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + // Note: we purposefully do not call cancel manually. The reason is + // when canceled, the request.Body will EOF when reading due to the way + // it streams data in. Cancel will still be run when the timeout is + // hit, so this doesn't really harm anything. + ctx, _ = c.withConfiguredTimeout(ctx) + return c.rawRequestWithContext(ctx, r) +} + +func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + minRetryWait := c.config.MinRetryWait + maxRetryWait := c.config.MaxRetryWait + maxRetries := c.config.MaxRetries + checkRetry := c.config.CheckRetry + backoff := c.config.Backoff + httpClient := c.config.HttpClient + ns := c.headers.Get(NamespaceHeaderName) + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + logger := c.config.Logger + disableRedirects := c.config.DisableRedirects + c.config.modifyLock.RUnlock() + + c.modifyLock.RUnlock() + + // ensure that the most current namespace setting is used at the time of the call + // e.g. calls using (*Client).WithNamespace + switch ns { + case "": + r.Headers.Del(NamespaceHeaderName) + default: + r.Headers.Set(NamespaceHeaderName, ns) + } + + for _, cb := range c.requestCallbacks { + cb(r) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.requireState(r) + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + redirectCount := 0 +START: + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + if req == nil { + return nil, fmt.Errorf("nil request created") + } + + if outputCurlString { + LastOutputStringError = &OutputStringError{ + Request: req, + TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, + ClientCert: c.config.curlClientCert, + ClientKey: c.config.curlClientKey, + ClientCACert: c.config.curlCACert, + ClientCAPath: c.config.curlCAPath, + } + return nil, LastOutputStringError + } + + if outputPolicy { + LastOutputPolicyError = &OutputPolicyError{ + method: req.Method, + path: strings.TrimPrefix(req.URL.Path, "/v1"), + } + return nil, LastOutputPolicyError + } + + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + if checkRetry == nil { + checkRetry = DefaultRetryPolicy + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: minRetryWait, + RetryWaitMax: maxRetryWait, + RetryMax: maxRetries, + Backoff: backoff, + CheckRetry: checkRetry, + Logger: logger, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } + + var result *Response + resp, err := client.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect (if redirects aren't disabled) + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if result != nil { + for _, cb := range c.responseCallbacks { + cb(result) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.recordState(result) + } + } + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} + +// httpRequestWithContext avoids the use of the go-retryable library found in RawRequestWithContext and is +// useful when making calls where a net/http client is desirable. A single redirect (status code 301, 302, +// or 307) will be followed but all retry and timeout logic is the responsibility of the caller as is +// closing the Response body. +func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + req, err := http.NewRequestWithContext(ctx, r.Method, r.URL.RequestURI(), r.Body) + if err != nil { + return nil, err + } + + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + httpClient := c.config.HttpClient + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + disableRedirects := c.config.DisableRedirects + + // add headers + if c.headers != nil { + for header, vals := range c.headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + // explicitly set the namespace header to current client + if ns := c.headers.Get(NamespaceHeaderName); ns != "" { + r.Headers.Set(NamespaceHeaderName, ns) + } + } + + c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + + // OutputCurlString and OutputPolicy logic rely on the request type to be retryable.Request + if outputCurlString { + return nil, fmt.Errorf("output-curl-string is not implemented for this request") + } + if outputPolicy { + return nil, fmt.Errorf("output-policy is not implemented for this request") + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if len(r.ClientToken) != 0 { + req.Header.Set(AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + var result *Response + + resp, err := httpClient.Do(req) + + if resp != nil { + result = &Response{Response: resp} + } + + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect, if redirects aren't disabled + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Retry the request + resp, err = httpClient.Do(req) + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + } + + if err := result.Error(); err != nil { + return nil, err + } + + return result, nil +} + +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) + +// WithRequestCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every outgoing request. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithRequestCallbacks(callbacks ...RequestCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.requestCallbacks = callbacks + return &c2 +} + +// WithResponseCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every received response. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.responseCallbacks = callbacks + return &c2 +} + +// withConfiguredTimeout wraps the context with a timeout from the client configuration. +func (c *Client) withConfiguredTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + timeout := c.ClientTimeout() + + if timeout > 0 { + return context.WithTimeout(ctx, timeout) + } + + return ctx, func() {} +} + +// RecordState returns a response callback that will record the state returned +// by Vault in a response header. +func RecordState(state *string) ResponseCallback { + return func(resp *Response) { + *state = resp.Header.Get(HeaderIndex) + } +} + +// RequireState returns a request callback that will add a request header to +// specify the state we require of Vault. This state was obtained from a +// response header seen previous, probably captured with RecordState. +func RequireState(states ...string) RequestCallback { + return func(req *Request) { + for _, s := range states { + req.Headers.Add(HeaderIndex, s) + } + } +} + +// compareReplicationStates returns 1 if s1 is newer or identical, -1 if s1 is older, and 0 +// if neither s1 or s2 is strictly greater. An error is returned if s1 or s2 +// are invalid or from different clusters. +func compareReplicationStates(s1, s2 string) (int, error) { + w1, err := ParseReplicationState(s1, nil) + if err != nil { + return 0, err + } + w2, err := ParseReplicationState(s2, nil) + if err != nil { + return 0, err + } + + if w1.ClusterID != w2.ClusterID { + return 0, fmt.Errorf("can't compare replication states with different ClusterIDs") + } + + switch { + case w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex: + return 1, nil + // We've already handled the case where both are equal above, so really we're + // asking here if one or both are lesser. + case w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex: + return -1, nil + } + + return 0, nil +} + +// MergeReplicationStates returns a merged array of replication states by iterating +// through all states in `old`. An iterated state is merged to the result before `new` +// based on the result of compareReplicationStates +func MergeReplicationStates(old []string, new string) []string { + if len(old) == 0 || len(old) > 2 { + return []string{new} + } + + var ret []string + for _, o := range old { + c, err := compareReplicationStates(o, new) + if err != nil { + return []string{new} + } + switch c { + case 1: + ret = append(ret, o) + case -1: + ret = append(ret, new) + case 0: + ret = append(ret, o, new) + } + } + return strutil.RemoveDuplicates(ret, false) +} + +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { + cooked, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + s := string(cooked) + + lastIndex := strings.LastIndexByte(s, ':') + if lastIndex == -1 { + return nil, fmt.Errorf("invalid full state header format") + } + state, stateHMACRaw := s[:lastIndex], s[lastIndex+1:] + stateHMAC, err := hex.DecodeString(stateHMACRaw) + if err != nil { + return nil, fmt.Errorf("invalid state header HMAC: %v, %w", stateHMACRaw, err) + } + + if len(hmacKey) != 0 { + hm := hmac.New(sha256.New, hmacKey) + hm.Write([]byte(state)) + if !hmac.Equal(hm.Sum(nil), stateHMAC) { + return nil, fmt.Errorf("invalid state header HMAC (mismatch)") + } + } + + pieces := strings.Split(state, ":") + if len(pieces) != 4 || pieces[0] != "v1" || pieces[1] == "" { + return nil, fmt.Errorf("invalid state header format") + } + localIndex, err := strconv.ParseUint(pieces[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid local index in state header: %w", err) + } + replicatedIndex, err := strconv.ParseUint(pieces[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid replicated index in state header: %w", err) + } + + return &WALState{ + ClusterID: pieces[1], + LocalIndex: localIndex, + ReplicatedIndex: replicatedIndex, + }, nil +} + +// ForwardInconsistent returns a request callback that will add a request +// header which says: if the state required isn't present on the node receiving +// this request, forward it to the active node. This should be used in +// conjunction with RequireState. +func ForwardInconsistent() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderInconsistent, "forward-active-node") + } +} + +// ForwardAlways returns a request callback which adds a header telling any +// performance standbys handling the request to forward it to the active node. +// This feature must be enabled in Vault's configuration. +func ForwardAlways() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderForward, "active-node") + } +} + +// DefaultRetryPolicy is the default retry policy used by new Client objects. +// It is the same as retryablehttp.DefaultRetryPolicy except that it also retries +// 412 requests, which are returned by Vault when a X-Vault-Index header isn't +// satisfied. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + retry, err := retryablehttp.DefaultRetryPolicy(ctx, resp, err) + if err != nil || retry { + return retry, err + } + if resp != nil && resp.StatusCode == 412 { + return true, nil + } + return false, nil +} + +// replicationStateStore is used to track cluster replication states +// in order to ensure proper read-after-write semantics for a Client. +type replicationStateStore struct { + m sync.RWMutex + store []string +} + +// recordState updates the store's replication states with the merger of all +// states. +func (w *replicationStateStore) recordState(resp *Response) { + w.m.Lock() + defer w.m.Unlock() + newState := resp.Header.Get(HeaderIndex) + if newState != "" { + w.store = MergeReplicationStates(w.store, newState) + } +} + +// requireState updates the Request with the store's current replication states. +func (w *replicationStateStore) requireState(req *Request) { + w.m.RLock() + defer w.m.RUnlock() + for _, s := range w.store { + req.Headers.Add(HeaderIndex, s) + } +} + +// states currently stored. +func (w *replicationStateStore) states() []string { + w.m.RLock() + defer w.m.RUnlock() + c := make([]string, len(w.store)) + copy(c, w.store) + return c +} + +// validateToken will check for non-printable characters to prevent a call that will fail at the api +func validateToken(t string) error { + idx := strings.IndexFunc(t, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go new file mode 100644 index 00000000000..0988ebcd1fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -0,0 +1,37 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +// Help wraps HelpWithContext using context.Background. +func (c *Client) Help(path string) (*Help, error) { + return c.HelpWithContext(context.Background(), path) +} + +// HelpWithContext reads the help information for the given path. +func (c *Client) HelpWithContext(ctx context.Context, path string) (*Help, error) { + ctx, cancelFunc := c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + + resp, err := c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` +} diff --git a/vendor/github.com/hashicorp/vault/api/kv.go b/vendor/github.com/hashicorp/vault/api/kv.go new file mode 100644 index 00000000000..37699df266f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv.go @@ -0,0 +1,56 @@ +package api + +import "errors" + +// ErrSecretNotFound is returned by KVv1 and KVv2 wrappers to indicate that the +// secret is missing at the given location. +var ErrSecretNotFound = errors.New("secret not found") + +// A KVSecret is a key-value secret returned by Vault's KV secrets engine, +// and is the most basic type of secret stored in Vault. +// +// Data contains the key-value pairs of the secret itself, +// while Metadata contains a subset of metadata describing +// this particular version of the secret. +// The Metadata field for a KV v1 secret will always be nil, as +// metadata is only supported starting in KV v2. +// +// The Raw field can be inspected for information about the lease, +// and passed to a LifetimeWatcher object for periodic renewal. +type KVSecret struct { + Data map[string]interface{} + VersionMetadata *KVVersionMetadata + CustomMetadata map[string]interface{} + Raw *Secret +} + +// KVv1 is used to return a client for reads and writes against +// a KV v1 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// While v1 is not necessarily deprecated, Vault development servers tend to +// use v2 as the version of the KV secrets engine, as this is what's mounted +// by default when a server is started in -dev mode. See the kvv2 struct. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv1(mountPath string) *KVv1 { + return &KVv1{c: c, mountPath: mountPath} +} + +// KVv2 is used to return a client for reads and writes against +// a KV v2 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// Vault development servers tend to have "secret" as the mount path, +// as these are the default settings when a server is started in -dev mode. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv2(mountPath string) *KVv2 { + return &KVv2{c: c, mountPath: mountPath} +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v1.go b/vendor/github.com/hashicorp/vault/api/kv_v1.go new file mode 100644 index 00000000000..22ba992384b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v1.go @@ -0,0 +1,57 @@ +package api + +import ( + "context" + "fmt" +) + +type KVv1 struct { + c *Client + mountPath string +} + +// Get returns a secret from the KV v1 secrets engine. +func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + return &KVSecret{ + Data: secret.Data, + VersionMetadata: nil, + Raw: secret, + }, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) into the +// KV v1 secrets engine. +// +// If the secret already exists, it will be overwritten. +func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { + pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data) + if err != nil { + return fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes a secret from the KV v1 secrets engine. +func (kv *KVv1) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v2.go b/vendor/github.com/hashicorp/vault/api/kv_v2.go new file mode 100644 index 00000000000..335c21001be --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v2.go @@ -0,0 +1,778 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "sort" + "strconv" + "time" + + "github.com/mitchellh/mapstructure" +) + +type KVv2 struct { + c *Client + mountPath string +} + +// KVMetadata is the full metadata for a given KV v2 secret. +type KVMetadata struct { + CASRequired bool `mapstructure:"cas_required"` + CreatedTime time.Time `mapstructure:"created_time"` + CurrentVersion int `mapstructure:"current_version"` + CustomMetadata map[string]interface{} `mapstructure:"custom_metadata"` + DeleteVersionAfter time.Duration `mapstructure:"delete_version_after"` + MaxVersions int `mapstructure:"max_versions"` + OldestVersion int `mapstructure:"oldest_version"` + UpdatedTime time.Time `mapstructure:"updated_time"` + // Keys are stringified ints, e.g. "3". To get a sorted slice of version metadata, use GetVersionsAsList. + Versions map[string]KVVersionMetadata `mapstructure:"versions"` + Raw *Secret +} + +// KVMetadataPutInput is the subset of metadata that can be replaced for a +// KV v2 secret using the PutMetadata method. +// +// All fields should be explicitly provided, as any fields left unset in the +// struct will be reset to their zero value. +type KVMetadataPutInput struct { + CASRequired bool + CustomMetadata map[string]interface{} + DeleteVersionAfter time.Duration + MaxVersions int +} + +// KVMetadataPatchInput is the subset of metadata that can be manually modified for +// a KV v2 secret using the PatchMetadata method. +// +// The struct's fields are all pointers. A pointer to a field's zero +// value (e.g. false for *bool) implies that field should be reset to its +// zero value after update, whereas a field left as a nil pointer +// (e.g. nil for *bool) implies the field should remain unchanged. +// +// Since maps are already pointers, use an empty map to remove all +// custom metadata. +type KVMetadataPatchInput struct { + CASRequired *bool + CustomMetadata map[string]interface{} + DeleteVersionAfter *time.Duration + MaxVersions *int +} + +// KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret. +type KVVersionMetadata struct { + Version int `mapstructure:"version"` + CreatedTime time.Time `mapstructure:"created_time"` + DeletionTime time.Time `mapstructure:"deletion_time"` + Destroyed bool `mapstructure:"destroyed"` +} + +// Currently supported options: WithOption, WithCheckAndSet, WithMethod +type KVOption func() (key string, value interface{}) + +const ( + KVOptionCheckAndSet = "cas" + KVOptionMethod = "method" + KVMergeMethodPatch = "patch" + KVMergeMethodReadWrite = "rw" +) + +// WithOption can optionally be passed to provide generic options for a +// KV request. Valid keys and values depend on the type of request. +func WithOption(key string, value interface{}) KVOption { + return func() (string, interface{}) { + return key, value + } +} + +// WithCheckAndSet can optionally be passed to perform a check-and-set +// operation on a KV request. If not set, the write will be allowed. +// If cas is set to 0, a write will only be allowed if the key doesn't exist. +// If set to non-zero, the write will only be allowed if the key’s current +// version matches the version specified in the cas parameter. +func WithCheckAndSet(cas int) KVOption { + return WithOption(KVOptionCheckAndSet, cas) +} + +// WithMergeMethod can optionally be passed to dictate which type of +// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH +// request will be issued. If set to "rw", then a read will be performed, +// then a local update, followed by a remote update. Defaults to "patch". +func WithMergeMethod(method string) KVOption { + return WithOption(KVOptionMethod, method) +} + +// Get returns the latest version of a secret from the KV v2 secrets engine. +// +// If the latest version has been deleted, an error will not be thrown, but +// the Data field on the returned secret will be nil, and the Metadata field +// will contain the deletion time. +func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersion returns the data and metadata for a specific version of the +// given secret. +// +// If that version has been deleted, the Data field on the +// returned secret will be nil, and the Metadata field will contain the deletion time. +// +// GetVersionsAsList can provide a list of available versions sorted by +// version number, while the response from GetMetadata contains them as a map. +func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + queryParams := map[string][]string{"version": {strconv.Itoa(version)}} + secret, err := kv.c.Logical().ReadWithDataWithContext(ctx, pathToRead, queryParams) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("%w: for version %d at %s", ErrSecretNotFound, version, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number. +func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret to determine versions: %w", err) + } + + versionsList := make([]KVVersionMetadata, 0, len(md.Versions)) + for _, versionMetadata := range md.Versions { + versionsList = append(versionsList, versionMetadata) + } + + sort.Slice(versionsList, func(i, j int) bool { return versionsList[i].Version < versionsList[j].Version }) + return versionsList, nil +} + +// GetMetadata returns the full metadata for a given secret, including a map of +// its existing versions and their respective creation/deletion times, etc. +func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret: %w", err) + } + + return md, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) +// into the KV v2 secrets engine. +// +// If the secret already exists, a new version will be created +// and the previous version can be accessed with the GetVersion method. +// GetMetadata can provide a list of available versions. +func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + wrappedData := map[string]interface{}{ + "data": data, + } + + // Add options such as check-and-set, etc. + // We leave this as an optional arg so that most users + // can just pass plain key-value secret data without + // having to remember to put the extra layer "data" in there. + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, wrappedData) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: after writing to %s", ErrSecretNotFound, pathToWriteTo) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// PutMetadata can be used to fully replace a subset of metadata fields for a +// given KV v2 secret. All fields will replace the corresponding values on the Vault server. +// Any fields left as nil will reset the field on the Vault server back to its zero value. +// +// To only partially replace the values of these metadata fields, use PatchMetadata. +// +// This method can also be used to create a new secret with just metadata and no secret data yet. +func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // convert values to a map we can pass to Logical + metadataMap := make(map[string]interface{}) + metadataMap[maxVersionsKey] = metadata.MaxVersions + metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String() + metadataMap[casRequiredKey] = metadata.CASRequired + metadataMap[customMetadataKey] = metadata.CustomMetadata + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap) + if err != nil { + return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Patch additively updates the most recent version of a key-value secret, +// differentiating it from Put which will fully overwrite the previous data. +// Only the key-value pairs that are new or changing need to be provided. +// +// The WithMethod KVOption function can optionally be passed to dictate which +// kind of patch to perform, as older Vault server versions (pre-1.9.0) may +// only be able to use the old "rw" (read-then-write) style of partial update, +// whereas newer Vault servers can use the default value of "patch" if the +// client token's policy has the "patch" capability. +func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + // determine patch method + var patchMethod string + var ok bool + for _, opt := range opts { + k, v := opt() + if k == "method" { + patchMethod, ok = v.(string) + if !ok { + return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"") + } + } + } + + // Determine which kind of patch to use, + // the newer HTTP Patch style or the older read-then-write style + var kvs *KVSecret + var err error + switch patchMethod { + case "rw": + kvs, err = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) + case "patch": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + case "": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + default: + return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") + } + if err != nil { + return nil, fmt.Errorf("unable to perform patch: %w", err) + } + if kvs == nil { + return nil, fmt.Errorf("no secret was written to %s", secretPath) + } + + return kvs, nil +} + +// PatchMetadata can be used to replace just a subset of a secret's +// metadata fields at a time, as opposed to PutMetadata which is used to +// completely replace all fields on the previous metadata. +func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + md, err := toMetadataMap(metadata) + if err != nil { + return fmt.Errorf("unable to create map for JSON merge patch request: %w", err) + } + + _, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md) + if err != nil { + return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes the most recent version of a secret from the KV v2 +// secrets engine. To delete an older version, use DeleteVersions. +func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteVersions deletes the specified versions of a secret from the KV v2 +// secrets engine. To delete the latest version of a secret, just use Delete. +func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { + // verb and path are different when trying to delete past versions + pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath) + + if len(versions) == 0 { + return nil + } + + var versionsToDelete []string + for _, version := range versions { + versionsToDelete = append(versionsToDelete, strconv.Itoa(version)) + } + versionsMap := map[string]interface{}{ + "versions": versionsToDelete, + } + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDelete, versionsMap) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteMetadata deletes all versions and metadata of the secret at the +// given path. +func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err) + } + + return nil +} + +// Undelete undeletes the given versions of a secret, restoring the data +// so that it can be fetched again with Get requests. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error { + pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data) + if err != nil { + return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err) + } + + return nil +} + +// Destroy permanently removes the specified secret versions' data +// from the Vault server. If no secret exists at the given path, no +// action will be taken. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error { + pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data) + if err != nil { + return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err) + } + + return nil +} + +// Rollback can be used to roll a secret back to a previous +// non-deleted/non-destroyed version. That previous version becomes the +// next/newest version for the path. +func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) { + // First, do a read to get the current version for check-and-set + latest, err := kv.Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("unable to get latest version of secret: %w", err) + } + + // Make sure a value already exists + if latest == nil { + return nil, fmt.Errorf("no secret was found: %w", err) + } + + // Verify metadata found + if latest.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data") + } + + // Now run it again and read the version we want to roll back to + rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) + if err != nil { + return nil, fmt.Errorf("unable to get previous version %d of secret: %w", toVersion, err) + } + + err = validateRollbackVersion(rollbackVersion) + if err != nil { + return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err) + } + + casVersion := latest.VersionMetadata.Version + kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion)) + if err != nil { + return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err) + } + + return kvs, nil +} + +func extractCustomMetadata(secret *Secret) map[string]interface{} { + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + customMetadataInterface, ok := secret.Data["custom_metadata"] + if !ok { + metadataInterface := secret.Data["metadata"] + metadataMap, ok := metadataInterface.(map[string]interface{}) + if !ok { + return nil + } + customMetadataInterface = metadataMap["custom_metadata"] + } + + cm, ok := customMetadataInterface.(map[string]interface{}) + if !ok { + return nil + } + + return cm +} + +func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { + // A nil map is a valid value for data: secret.Data will be nil when this + // version of the secret has been deleted, but the metadata is still + // available. + var data map[string]interface{} + if secret.Data != nil { + dataInterface, ok := secret.Data["data"] + if !ok { + return nil, fmt.Errorf("missing expected 'data' element") + } + + if dataInterface != nil { + data, ok = dataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'data' element: %T (%#v)", data, data) + } + } + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to get version metadata: %w", err) + } + + return &KVSecret{ + Data: data, + VersionMetadata: metadata, + Raw: secret, + }, nil +} + +func extractVersionMetadata(secret *Secret) (*KVVersionMetadata, error) { + var metadata *KVVersionMetadata + + if secret.Data == nil { + return nil, nil + } + + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + var metadataMap map[string]interface{} + metadataInterface, ok := secret.Data["metadata"] + if ok { + metadataMap, ok = metadataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + } else { + metadataMap = secret.Data + } + + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(metadataMap) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into VersionMetadata: %w", err) + } + + return metadata, nil +} + +func extractFullMetadata(secret *Secret) (*KVMetadata, error) { + var metadata *KVMetadata + + if secret.Data == nil { + return nil, nil + } + + if versions, ok := secret.Data["versions"]; ok { + versionsMap := versions.(map[string]interface{}) + if len(versionsMap) > 0 { + for version, metadata := range versionsMap { + metadataMap := metadata.(map[string]interface{}) + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + versionInt, err := strconv.Atoi(version) + if err != nil { + return nil, fmt.Errorf("error converting version %s to integer: %w", version, err) + } + metadataMap["version"] = versionInt + versionsMap[version] = metadataMap // save the updated copy of the metadata map + } + } + secret.Data["versions"] = versionsMap // save the updated copy of the versions map + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeHookFunc(time.RFC3339), + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(secret.Data) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into KVMetadata: %w", err) + } + + return metadata, nil +} + +func validateRollbackVersion(rollbackVersion *KVSecret) error { + // Make sure a value already exists + if rollbackVersion == nil || rollbackVersion.Data == nil { + return fmt.Errorf("no secret found") + } + + // Verify metadata found + if rollbackVersion.VersionMetadata == nil { + return fmt.Errorf("no version metadata found; rollback only works on existing data") + } + + // Verify it hasn't been deleted + if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() { + return fmt.Errorf("cannot roll back to a version that has been deleted") + } + + if rollbackVersion.VersionMetadata.Destroyed { + return fmt.Errorf("cannot roll back to a version that has been destroyed") + } + + // Verify old data found + if rollbackVersion.Data == nil { + return fmt.Errorf("no data found; rollback only works on existing data") + } + + return nil +} + +func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath) + + // take any other additional options provided + // and pass them along to the patch request + wrappedData := map[string]interface{}{ + "data": newData, + } + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) + if err != nil { + var re *ResponseError + + if errors.As(err, &re) { + switch re.StatusCode { + // 403 + case http.StatusForbidden: + return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err) + + // 404 + case http.StatusNotFound: + return nil, fmt.Errorf("%w: performing merge patch to %s", ErrSecretNotFound, pathToMergePatch) + + // 405 + case http.StatusMethodNotAllowed: + // If it's a 405, that probably means the server is running a pre-1.9 + // Vault version that doesn't support the HTTP PATCH method. + // Fall back to the old way of doing it. + return readThenWrite(ctx, client, mountPath, secretPath, newData) + } + } + + return nil, fmt.Errorf("error performing merge patch to %s: %w", pathToMergePatch, err) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) { + // First, read the secret. + existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err) + } + + // Make sure the secret already exists + if existingVersion == nil || existingVersion.Data == nil { + return nil, fmt.Errorf("%w: at %s as part of read-then-write patch operation", ErrSecretNotFound, secretPath) + } + + // Verify existing secret has metadata + if existingVersion.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath) + } + + // Copy new data over with existing data + combinedData := existingVersion.Data + for k, v := range newData { + combinedData[k] = v + } + + updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version)) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err) + } + + return updatedSecret, nil +} + +func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) { + metadataMap := make(map[string]interface{}) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // The KVMetadataPatchInput struct is designed to have pointer fields so that + // the user can easily express the difference between explicitly setting a + // field back to its zero value (e.g. false), as opposed to just having + // the field remain unchanged (e.g. nil). This way, they only need to pass + // the fields they want to change. + if patchInput.MaxVersions != nil { + metadataMap[maxVersionsKey] = *(patchInput.MaxVersions) + } + if patchInput.CASRequired != nil { + metadataMap[casRequiredKey] = *(patchInput.CASRequired) + } + if patchInput.CustomMetadata != nil { + if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys + metadataMap[customMetadataKey] = nil + } else { + metadataMap[customMetadataKey] = patchInput.CustomMetadata + } + } + if patchInput.DeleteVersionAfter != nil { + metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String() + } + + return metadataMap, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go new file mode 100644 index 00000000000..5f90de00a20 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -0,0 +1,405 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var ( + ErrLifetimeWatcherMissingInput = errors.New("missing input") + ErrLifetimeWatcherMissingSecret = errors.New("missing secret") + ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable") + ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data") + + // Deprecated; kept for compatibility + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultLifetimeWatcherRenewBuffer = 5 + // Deprecated: kept for backwards compatibility + DefaultRenewerRenewBuffer = 5 +) + +type RenewBehavior uint + +const ( + // RenewBehaviorIgnoreErrors means we will attempt to keep renewing until + // we hit the lifetime threshold. It also ignores errors stemming from + // passing a non-renewable lease in. In practice, this means you simply + // reauthenticate/refetch credentials when the watcher exits. This is the + // default. + RenewBehaviorIgnoreErrors RenewBehavior = iota + + // RenewBehaviorRenewDisabled turns off renewal attempts entirely. This + // allows you to simply watch lifetime and have the watcher return at a + // reasonable threshold without actually making Vault calls. + RenewBehaviorRenewDisabled + + // RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits + // on some kind of error + RenewBehaviorErrorOnErrors +) + +// LifetimeWatcher is a process for watching lifetime of a secret. +// +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() +// +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// `DoneCh` will return if renewal fails, or if the remaining lease duration is +// under a built-in threshold and either renewing is not extending it or +// renewing is disabled. In both cases, the caller should attempt a re-read of +// the secret. Clients should check the return value of the channel to see if +// renewal was successful. +type LifetimeWatcher struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + renewBehavior RenewBehavior + + stopped bool + stopCh chan struct{} + + errLifetimeWatcherNotRenewable error + errLifetimeWatcherNoSecretData error +} + +// LifetimeWatcherInput is used as input to the renew function. +type LifetimeWatcherInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. If specified, the + // minimum of this value and the remaining lease duration will be used + // for grace period calculations. + Increment int + + // RenewBehavior controls what happens when a renewal errors or the + // passed-in secret is not renewable. + RenewBehavior RenewBehavior +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewLifetimeWatcher creates a new renewer from the given input. +func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrLifetimeWatcherMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrLifetimeWatcherMissingSecret + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultLifetimeWatcherRenewBuffer + } + + return &LifetimeWatcher{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + renewBehavior: i.RenewBehavior, + + stopped: false, + stopCh: make(chan struct{}), + + errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable, + errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData, + }, nil +} + +// Deprecated: exists only for backwards compatibility. Calls +// NewLifetimeWatcher, and sets compatibility flags. +func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + renewer, err := c.NewLifetimeWatcher(i) + if err != nil { + return nil, err + } + + renewer.renewBehavior = RenewBehaviorErrorOnErrors + renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable + renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData + return renewer, err +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *LifetimeWatcher) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *LifetimeWatcher) Stop() { + r.l.Lock() + defer r.l.Unlock() + + if !r.stopped { + close(r.stopCh) + r.stopped = true + } +} + +// Start starts a background process for watching the lifetime of this secret. +// If renewal is enabled, when the secret has auth data, this attempts to renew +// the auth (token); When the secret has a lease, this attempts to renew the +// lease. +func (r *LifetimeWatcher) Start() { + r.doneCh <- r.doRenew() +} + +// Renew is for compatibility with the legacy api.Renewer. Calling Renew +// simply chains to Start. +func (r *LifetimeWatcher) Renew() { + r.Start() +} + +type renewFunc func(string, int) (*Secret, error) + +// doRenew is a helper for renewing authentication. +func (r *LifetimeWatcher) doRenew() error { + defaultInitialRetryInterval := 10 * time.Second + switch { + case r.secret.Auth != nil: + return r.doRenewWithOptions(true, !r.secret.Auth.Renewable, + r.secret.Auth.LeaseDuration, r.secret.Auth.ClientToken, + r.client.Auth().Token().RenewTokenAsSelf, defaultInitialRetryInterval) + default: + return r.doRenewWithOptions(false, !r.secret.Renewable, + r.secret.LeaseDuration, r.secret.LeaseID, + r.client.Sys().Renew, defaultInitialRetryInterval) + } +} + +func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, + renew renewFunc, initialRetryInterval time.Duration, +) error { + if credString == "" || + (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { + return r.errLifetimeWatcherNotRenewable + } + + initialTime := time.Now() + priorDuration := time.Duration(initLeaseDuration) * time.Second + r.calculateGrace(priorDuration, time.Duration(r.increment)*time.Second) + var errorBackoff backoff.BackOff + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + var remainingLeaseDuration time.Duration + fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now()) + var renewal *Secret + var err error + + switch { + case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: + // Can't or won't renew, just keep the same expiration so we exit + // when it's reauthentication time + remainingLeaseDuration = fallbackLeaseDuration + + default: + // Renew the token + renewal, err = renew(credString, r.increment) + if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { + if r.renewBehavior == RenewBehaviorErrorOnErrors { + if err != nil { + return err + } + if renewal == nil || (tokenMode && renewal.Auth == nil) { + return r.errLifetimeWatcherNoSecretData + } + } + + // Calculate remaining duration until initial token lease expires + remainingLeaseDuration = initialTime.Add(time.Duration(initLeaseDuration) * time.Second).Sub(time.Now()) + if errorBackoff == nil { + errorBackoff = &backoff.ExponentialBackOff{ + MaxElapsedTime: remainingLeaseDuration, + RandomizationFactor: backoff.DefaultRandomizationFactor, + InitialInterval: initialRetryInterval, + MaxInterval: 5 * time.Minute, + Multiplier: 2, + Clock: backoff.SystemClock, + } + errorBackoff.Reset() + } + break + } + errorBackoff = nil + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Possibly error if we are not renewable + if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) && + r.renewBehavior == RenewBehaviorErrorOnErrors { + return r.errLifetimeWatcherNotRenewable + } + + // Reset initial time + initialTime = time.Now() + + // Grab the lease duration + initLeaseDuration = renewal.LeaseDuration + if tokenMode { + initLeaseDuration = renewal.Auth.LeaseDuration + } + + remainingLeaseDuration = time.Duration(initLeaseDuration) * time.Second + } + + var sleepDuration time.Duration + + if errorBackoff != nil { + sleepDuration = errorBackoff.NextBackOff() + if sleepDuration == backoff.Stop { + return err + } + } else { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + priorDuration = remainingLeaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + } + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if remainingLeaseDuration <= r.grace || remainingLeaseDuration-sleepDuration <= r.grace { + return nil + } + + timer := time.NewTimer(sleepDuration) + select { + case <-r.stopCh: + timer.Stop() + return nil + case <-timer.C: + continue + } + } +} + +// calculateGrace calculates the grace period based on the minimum of the +// remaining lease duration and the token increment value; it also adds some +// jitter to not have clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration, increment time.Duration) { + minDuration := leaseDuration + if minDuration > increment && increment > 0 { + minDuration = increment + } + + if minDuration <= 0 { + r.grace = 0 + return + } + + leaseNanos := float64(minDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} + +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go new file mode 100644 index 00000000000..2c453897715 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -0,0 +1,400 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/hashicorp/errwrap" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +var ( + // The default TTL that will be used with `sys/wrapping/wrap`, can be + // changed + DefaultWrappingTTL = "5m" + + // The default function used if no other function is set. It honors the env + // var to set the wrap TTL. The default wrap TTL will apply when when writing + // to `sys/wrapping/wrap` when the env var is not set. + DefaultWrappingLookupFunc = func(operation, path string) string { + if os.Getenv(EnvVaultWrapTTL) != "" { + return os.Getenv(EnvVaultWrapTTL) + } + + if (operation == http.MethodPut || operation == http.MethodPost) && path == "sys/wrapping/wrap" { + return DefaultWrappingTTL + } + + return "" + } +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, nil) +} + +func (c *Logical) ReadWithContext(ctx context.Context, path string) (*Secret, error) { + return c.ReadWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.readRawWithDataWithContext(ctx, path, data) + return c.ParseRawResponseAndCloseBody(resp, err) +} + +// ReadRaw attempts to read the value stored at the given Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use ReadRawWithContext +// instead and set the timeout through context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRaw(path string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, nil) +} + +// ReadRawWithContext attempts to read the value stored at the give Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithContext(ctx context.Context, path string) (*Response, error) { + return c.ReadRawWithDataWithContext(ctx, path, nil) +} + +// ReadRawWithData attempts to read the value stored at the given Vault +// path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' map +// is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use +// ReadRawWithDataWithContext instead and set the timeout through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, data) +} + +// ReadRawWithDataWithContext attempts to read the value stored at the given +// Vault path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' +// map is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + return c.readRawWithDataWithContext(ctx, path, data) +} + +func (c *Logical) ParseRawResponseAndCloseBody(resp *Response, err error) (*Secret, error) { + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + return c.c.RawRequestWithContext(ctx, r) +} + +func (c *Logical) List(path string) (*Secret, error) { + return c.ListWithContext(context.Background(), path) +} + +func (c *Logical) ListWithContext(ctx context.Context, path string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/"+path) + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + return c.WriteWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) + r.Headers.Set("Content-Type", "application/merge-patch+json") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { + return c.WriteBytesWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteBytesWithContext(ctx context.Context, path string, data []byte) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.write(ctx, path, r) +} + +func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Delete(path string) (*Secret, error) { + return c.DeleteWithContext(context.Background(), path) +} + +func (c *Logical) DeleteWithContext(ctx context.Context, path string) (*Secret, error) { + return c.DeleteWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret, error) { + return c.DeleteWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) DeleteWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + return c.UnwrapWithContext(context.Background(), wrappingToken) +} + +func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + var data map[string]interface{} + wt := strings.TrimSpace(wrappingToken) + if wrappingToken != "" { + if c.c.Token() == "" { + c.c.SetToken(wt) + } else if wrappingToken != c.c.Token() { + data = map[string]interface{}{ + "token": wt, + } + } + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/wrapping/unwrap") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) + } + + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + + // Otherwise this might be an old-style wrapping token so attempt the old + // method + if wrappingToken != "" { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + c.c.SetToken(wrappingToken) + } + + secret, err = c.ReadWithContext(ctx, wrappedResponseLocation) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + dec := json.NewDecoder(buf) + dec.UseNumber() + if err := dec.Decode(wrappedSecret); err != nil { + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) + } + + return wrappedSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_policy.go b/vendor/github.com/hashicorp/vault/api/output_policy.go new file mode 100644 index 00000000000..85d1617e5e9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_policy.go @@ -0,0 +1,82 @@ +package api + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +const ( + ErrOutputPolicyRequest = "output a policy, please" +) + +var LastOutputPolicyError *OutputPolicyError + +type OutputPolicyError struct { + method string + path string + finalHCLString string +} + +func (d *OutputPolicyError) Error() string { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return err.Error() + } + d.finalHCLString = p + } + + return ErrOutputPolicyRequest +} + +func (d *OutputPolicyError) HCLString() (string, error) { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return "", err + } + d.finalHCLString = p + } + return d.finalHCLString, nil +} + +// Builds a sample policy document from the request +func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + var capabilities []string + switch d.method { + case http.MethodGet, "": + capabilities = append(capabilities, "read") + case http.MethodPost, http.MethodPut: + capabilities = append(capabilities, "create") + capabilities = append(capabilities, "update") + case http.MethodPatch: + capabilities = append(capabilities, "patch") + case http.MethodDelete: + capabilities = append(capabilities, "delete") + case "LIST": + capabilities = append(capabilities, "list") + } + + // sanitize, then trim the Vault address and v1 from the front of the path + path, err := url.PathUnescape(d.path) + if err != nil { + return "", fmt.Errorf("failed to unescape request URL characters: %v", err) + } + + // determine whether to add sudo capability + if IsSudoPath(path) { + capabilities = append(capabilities, "sudo") + } + + // the OpenAPI response has a / in front of each path, + // but policies need the path without that leading slash + path = strings.TrimLeft(path, "/") + + capStr := strings.Join(capabilities, `", "`) + return fmt.Sprintf( + `path "%s" { + capabilities = ["%s"] +}`, path, capStr), nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go new file mode 100644 index 00000000000..80c591f20b5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -0,0 +1,95 @@ +package api + +import ( + "fmt" + "net/http" + "strings" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +const ( + ErrOutputStringRequest = "output a string, please" +) + +var LastOutputStringError *OutputStringError + +type OutputStringError struct { + *retryablehttp.Request + TLSSkipVerify bool + ClientCACert, ClientCAPath string + ClientCert, ClientKey string + finalCurlString string +} + +func (d *OutputStringError) Error() string { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return err.Error() + } + d.finalCurlString = cs + } + + return ErrOutputStringRequest +} + +func (d *OutputStringError) CurlString() (string, error) { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return "", err + } + d.finalCurlString = cs + } + return d.finalCurlString, nil +} + +func (d *OutputStringError) buildCurlString() (string, error) { + body, err := d.Request.BodyBytes() + if err != nil { + return "", err + } + + // Build cURL string + finalCurlString := "curl " + if d.TLSSkipVerify { + finalCurlString += "--insecure " + } + if d.Request.Method != http.MethodGet { + finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) + } + if d.ClientCACert != "" { + clientCACert := strings.ReplaceAll(d.ClientCACert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) + } + if d.ClientCAPath != "" { + clientCAPath := strings.ReplaceAll(d.ClientCAPath, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) + } + if d.ClientCert != "" { + clientCert := strings.ReplaceAll(d.ClientCert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) + } + if d.ClientKey != "" { + clientKey := strings.ReplaceAll(d.ClientKey, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) + } + for k, v := range d.Request.Header { + for _, h := range v { + if strings.ToLower(k) == "x-vault-token" { + h = `$(vault print token)` + } + finalCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", finalCurlString, k, h) + } + } + + if len(body) > 0 { + // We need to escape single quotes since that's what we're using to + // quote the body + escapedBody := strings.ReplaceAll(string(body), "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) + } + + return fmt.Sprintf("%s%s", finalCurlString, d.Request.URL.String()), nil +} diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go new file mode 100644 index 00000000000..6602a044bd0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -0,0 +1,268 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "flag" + "net/url" + "os" + "regexp" + + squarejwt "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/errwrap" +) + +const ( + // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override + // setting a TLSProviderFunc for a plugin. + PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// sudoPaths is a map containing the paths that require a token's policy +// to have the "sudo" capability. The keys are the paths as strings, in +// the same format as they are returned by the OpenAPI spec. The values +// are the regular expressions that can be used to test whether a given +// path matches that path or not (useful specifically for the paths that +// contain templated fields.) +var sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/raw": regexp.MustCompile(`^/sys/raw$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), +} + +// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections +// back to Vault. +type PluginAPIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +// FlagSet returns the flag set for configuring the TLS connection +func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +// GetTLSConfig will return a TLSConfig based off the values from the flags +func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} + +// VaultPluginTLSProvider wraps VaultPluginTLSProviderContext using context.Background. +func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + return VaultPluginTLSProviderContext(context.Background(), apiTLSConfig) +} + +// VaultPluginTLSProviderContext is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginAutoMTLSEnv) == "true" || os.Getenv(PluginMetadataModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) + } + + allClaims := make(map[string]interface{}) + if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { + return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) + } + + addrClaimRaw, ok := allClaims["addr"] + if !ok { + return nil, errors.New("could not validate addr claim") + } + vaultAddr, ok := addrClaimRaw.(string) + if !ok { + return nil, errors.New("could not parse addr claim") + } + if vaultAddr == "" { + return nil, errors.New(`no vault api_addr found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errwrap.Wrapf("error parsing the vault api_addr: {{err}}", err) + } + + // Unwrap the token + clientConf := DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + // Reset token value to make sure nothing has been set by default + client.ClearToken() + + secret, err := client.Logical().UnwrapWithContext(ctx, unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + ServerName: serverCert.Subject.CommonName, + } + + return tlsConfig, nil + } +} + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/vault/api/plugin_types.go b/vendor/github.com/hashicorp/vault/api/plugin_types.go new file mode 100644 index 00000000000..3b85013b7eb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/plugin_types.go @@ -0,0 +1,63 @@ +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go new file mode 100644 index 00000000000..a8e53c01e47 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -0,0 +1,146 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Host string + Params url.Values + Headers http.Header + ClientToken string + MFAHeaderVals []string + WrapTTL string + Obj interface{} + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 + + // Whether to request overriding soft-mandatory Sentinel policies (RGPs and + // EGPs). If set, the override flag will take effect for all policies + // evaluated during the request. + PolicyOverride bool +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf, err := json.Marshal(val) + if err != nil { + return err + } + + r.Obj = val + r.BodyBytes = buf + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.BodyBytes == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) + if err != nil { + return nil, err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + return req, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go new file mode 100644 index 00000000000..a0e31144e56 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -0,0 +1,135 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes, otherwise, 429 is treated as quota limit reached. + if (r.StatusCode >= 200 && r.StatusCode < 400) || (r.StatusCode == 429 && r.Request.URL.Path == "/v1/sys/health") { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { + return err + } + + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + ns := r.Header.Get(NamespaceHeaderName) + + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + NamespacePath: ns, + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) + dec.UseNumber() + if err := dec.Decode(&resp); err != nil { + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors + } + + return respErr +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string + + // Namespace path to be reported to the client if it is set to anything other + // than root + NamespacePath string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var ns string + if r.NamespacePath != "" && r.NamespacePath != "root/" { + ns = "Namespace: " + r.NamespacePath + "\n" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + ns+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go new file mode 100644 index 00000000000..c45c4917cf1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -0,0 +1,382 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string `json:"request_id"` + + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// TokenID returns the standardized token ID (token) for the given secret. +func (s *Secret) TokenID() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.ClientToken) > 0 { + return s.Auth.ClientToken, nil + } + + if s.Data == nil || s.Data["id"] == nil { + return "", nil + } + + id, ok := s.Data["id"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return id, nil +} + +// TokenAccessor returns the standardized token accessor for the given secret. +// If the secret is nil or does not contain an accessor, this returns the empty +// string. +func (s *Secret) TokenAccessor() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.Accessor) > 0 { + return s.Auth.Accessor, nil + } + + if s.Data == nil || s.Data["accessor"] == nil { + return "", nil + } + + accessor, ok := s.Data["accessor"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return accessor, nil +} + +// TokenRemainingUses returns the standardized remaining uses for the given +// secret. If the secret is nil or does not contain the "num_uses", this +// returns -1. On error, this will return -1 and a non-nil error. +func (s *Secret) TokenRemainingUses() (int, error) { + if s == nil || s.Data == nil || s.Data["num_uses"] == nil { + return -1, nil + } + + return parseutil.SafeParseInt(s.Data["num_uses"]) +} + +// TokenPolicies returns the standardized list of policies for the given secret. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. +func (s *Secret) TokenPolicies() ([]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Policies) > 0 { + return s.Auth.Policies, nil + } + + if s.Data == nil || s.Data["policies"] == nil { + return nil, nil + } + + var tokenPolicies []string + + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } + } + +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + _, ok := s.Data["identity_policies"] + if !ok { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) + } + } + +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + + return policies, nil +} + +// TokenMetadata returns the map of metadata associated with this token, if any +// exists. If the secret is nil or does not contain the "metadata" key, this +// returns nil. +func (s *Secret) TokenMetadata() (map[string]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Metadata) > 0 { + return s.Auth.Metadata, nil + } + + if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) { + return nil, nil + } + + data, ok := s.Data["metadata"].(map[string]interface{}) + if !ok { + data, ok = s.Data["meta"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert metadata field to expected format") + } + } + + metadata := make(map[string]string, len(data)) + for k, v := range data { + typed, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert metadata value %v to string", v) + } + metadata[k] = typed + } + + return metadata, nil +} + +// TokenIsRenewable returns the standardized token renewability for the given +// secret. If the secret is nil or does not contain the "renewable" key, this +// returns false. +func (s *Secret) TokenIsRenewable() (bool, error) { + if s == nil { + return false, nil + } + + if s.Auth != nil && s.Auth.Renewable { + return s.Auth.Renewable, nil + } + + if s.Data == nil || s.Data["renewable"] == nil { + return false, nil + } + + renewable, err := parseutil.ParseBool(s.Data["renewable"]) + if err != nil { + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) + } + + return renewable, nil +} + +// TokenTTL returns the standardized remaining token TTL for the given secret. +// If the secret is nil or does not contain a TTL, this returns 0. +func (s *Secret) TokenTTL() (time.Duration, error) { + if s == nil { + return 0, nil + } + + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + return time.Duration(s.Auth.LeaseDuration) * time.Second, nil + } + + if s.Data == nil || s.Data["ttl"] == nil { + return 0, nil + } + + ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"]) + if err != nil { + return 0, err + } + + return ttl, nil +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +type MFAMethodID struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` + UsesPasscode bool `json:"uses_passcode,omitempty"` + Name string `json:"name,omitempty"` +} + +type MFAConstraintAny struct { + Any []*MFAMethodID `json:"any,omitempty"` +} + +type MFARequirement struct { + MFARequestID string `json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` + Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + MFARequirement *MFARequirement `json:"mfa_requirement"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + + // io.Reader is treated like a stream and cannot be read + // multiple times. Duplicating this stream using TeeReader + // to use this data in case there is no top-level data from + // api response + var teebuf bytes.Buffer + tee := io.TeeReader(r, &teebuf) + + _, err := buf.ReadFrom(tee) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + + // First decode the JSON into a map[string]interface{} + var secret Secret + dec := json.NewDecoder(&buf) + dec.UseNumber() + if err := dec.Decode(&secret); err != nil { + return nil, err + } + + // If the secret is null, add raw data to secret data if present + if reflect.DeepEqual(secret, Secret{}) { + data := make(map[string]interface{}) + dec := json.NewDecoder(&teebuf) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return nil, err + } + errRaw, errPresent := data["errors"] + + // if only errors are present in the resp.Body return nil + // to return value not found as it does not have any raw data + if len(data) == 1 && errPresent { + return nil, nil + } + + // if errors are present along with raw data return the error + if errPresent { + var errStrArray []string + errBytes, err := json.Marshal(errRaw) + if err != nil { + return nil, err + } + if err := json.Unmarshal(errBytes, &errStrArray); err != nil { + return nil, err + } + return nil, fmt.Errorf(strings.Join(errStrArray, " ")) + } + + // if any raw data is present in resp.Body, add it to secret + if len(data) > 0 { + secret.Data = data + } + } + + return &secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go new file mode 100644 index 00000000000..b832e274829 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -0,0 +1,75 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential wraps CredentialWithContext using context.Background. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + return c.CredentialWithContext(context.Background(), role, data) +} + +// CredentialWithContext invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) CredentialWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// SignKey wraps SignKeyWithContext using context.Background. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + return c.SignKeyWithContext(context.Background(), role, data) +} + +// SignKeyWithContext signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKeyWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go new file mode 100644 index 00000000000..c67b80dc4c1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -0,0 +1,273 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + multierror "github.com/hashicorp/go-multierror" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `json:"message" mapstructure:"message"` + + // Username associated with the OTP + Username string `json:"username" mapstructure:"username"` + + // IP associated with the OTP + IP string `json:"ip" mapstructure:"ip"` + + // Name of the role against which the OTP was issued + RoleName string `json:"role_name" mapstructure:"role_name"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + Namespace string `hcl:"namespace"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + AllowedRoles string `hcl:"allowed_roles"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` + TLSServerName string `hcl:"tls_server_name"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + ServerName: c.TLSServerName, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// Returns true if any of the following conditions are true: +// - CA cert is configured +// - CA path is configured +// - configured to skip certificate verification +// - TLS server name is configured +func (c *SSHHelperConfig) shouldSetTLSParameters() bool { + return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.shouldSetTLSParameters() { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + // Configure namespace + if c.Namespace != "" { + client.SetNamespace(c.Namespace) + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "namespace", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "allowed_roles", + "tls_skip_verify", + "tls_server_name", + } + if err := CheckHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf(`missing config "vault_addr"`) + } + return &c, nil +} + +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + return c.VerifyWithContext(context.Background(), otp) +} + +// VerifyWithContext the same as Verify but with a custom context. +func (c *SSHHelper) VerifyWithContext(ctx context.Context, otp string) (*SSHVerifyResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest(http.MethodPut, verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go new file mode 100644 index 00000000000..5fb111887c0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -0,0 +1,11 @@ +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go new file mode 100644 index 00000000000..82d9aab0b7a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -0,0 +1,156 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + return c.AuditHashWithContext(context.Background(), path, input) +} + +func (c *Sys) AuditHashWithContext(ctx context.Context, path string, input string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } + + return hashStr, nil +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + return c.ListAuditWithContext(context.Background()) +} + +func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/audit") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*Audit{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuditWithOptions instead +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string, +) error { + return c.EnableAuditWithOptions(path, &EnableAuditOptions{ + Type: auditType, + Description: desc, + Options: opts, + }) +} + +func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { + return c.EnableAuditWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuditWithOptionsWithContext(ctx context.Context, path string, options *EnableAuditOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + return c.DisableAuditWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuditWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/audit/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/response are all down here. They aren't +// individually documented because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type EnableAuditOptions struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` +} + +type Audit struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go new file mode 100644 index 00000000000..238bd5e468a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -0,0 +1,98 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + return c.ListAuthWithContext(context.Background()) +} + +func (c *Sys) ListAuthWithContext(ctx context.Context) (map[string]*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/auth") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*AuthMount{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuthWithOptions instead +func (c *Sys) EnableAuth(path, authType, desc string) error { + return c.EnableAuthWithOptions(path, &EnableAuthOptions{ + Type: authType, + Description: desc, + }) +} + +func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { + return c.EnableAuthWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuthWithOptionsWithContext(ctx context.Context, path string, options *EnableAuthOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + return c.DisableAuthWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuthWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Rather than duplicate, we can use modern Go's type aliasing +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go new file mode 100644 index 00000000000..af306a07f31 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -0,0 +1,77 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + return c.CapabilitiesSelfWithContext(context.Background(), path) +} + +func (c *Sys) CapabilitiesSelfWithContext(ctx context.Context, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.CapabilitiesWithContext(ctx, c.c.Token(), path) +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + return c.CapabilitiesWithContext(context.Background(), token, path) +} + +func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "token": token, + "path": path, + } + + reqPath := "/v1/sys/capabilities" + if token == c.c.Token() { + reqPath = fmt.Sprintf("%s-self", reqPath) + } + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go new file mode 100644 index 00000000000..1e2cda4f48c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -0,0 +1,91 @@ +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + return c.CORSStatusWithContext(context.Background()) +} + +func (c *Sys) CORSStatusWithContext(ctx context.Context) (*CORSResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) error { + return c.ConfigureCORSWithContext(context.Background(), req) +} + +func (c *Sys) ConfigureCORSWithContext(ctx context.Context, req *CORSRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) DisableCORS() error { + return c.DisableCORSWithContext(context.Background()) +} + +func (c *Sys) DisableCORSWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type CORSRequest struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go new file mode 100644 index 00000000000..096cadb793d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -0,0 +1,195 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRootStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRootStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootStatusCommonWithContext(ctx context.Context, path string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRootInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommonWithContext(ctx context.Context, path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + return c.GenerateRootCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenCancel() error { + return c.GenerateDROperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenCancel() error { + return c.GenerateRecoveryOperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRootCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootCancelCommonWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRootUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-recovery-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommonWithContext(ctx context.Context, path, shard, nonce string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go new file mode 100644 index 00000000000..d89d59651a9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -0,0 +1,43 @@ +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) HAStatus() (*HAStatusResponse, error) { + return c.HAStatusWithContext(context.Background()) +} + +func (c *Sys) HAStatusWithContext(ctx context.Context) (*HAStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/ha-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HAStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HAStatusResponse struct { + Nodes []HANode +} + +type HANode struct { + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go new file mode 100644 index 00000000000..953c1c21eaa --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -0,0 +1,49 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) Health() (*HealthResponse, error) { + return c.HealthWithContext(context.Background()) +} + +func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("uninitcode", "299") + r.Params.Add("sealedcode", "299") + r.Params.Add("standbycode", "299") + r.Params.Add("drsecondarycode", "299") + r.Params.Add("performancestandbycode", "299") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go new file mode 100644 index 00000000000..05dea86f6ab --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -0,0 +1,74 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) InitStatus() (bool, error) { + return c.InitStatusWithContext(context.Background()) +} + +func (c *Sys) InitStatusWithContext(ctx context.Context) (bool, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + return c.InitWithContext(context.Background(), opts) +} + +func (c *Sys) InitWithContext(ctx context.Context, opts *InitRequest) (*InitResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys"` + RecoveryKeysB64 []string `json:"recovery_keys_base64"` + RootToken string `json:"root_token"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go new file mode 100644 index 00000000000..a74e206ebed --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -0,0 +1,41 @@ +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) Leader() (*LeaderResponse, error) { + return c.LeaderWithContext(context.Background()) +} + +func (c *Sys) LeaderWithContext(ctx context.Context) (*LeaderResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/leader") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + ActiveTime time.Time `json:"active_time"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` + RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` + RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go new file mode 100644 index 00000000000..c02402f5314 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -0,0 +1,163 @@ +package api + +import ( + "context" + "errors" + "net/http" +) + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), id, increment) +} + +func (c *Sys) RenewWithContext(ctx context.Context, id string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/renew") + + body := map[string]interface{}{ + "increment": increment, + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Lookup(id string) (*Secret, error) { + return c.LookupWithContext(context.Background(), id) +} + +func (c *Sys) LookupWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/lookup") + + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + return c.RevokeWithContext(context.Background(), id) +} + +func (c *Sys) RevokeWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke") + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + return c.RevokePrefixWithContext(context.Background(), id) +} + +func (c *Sys) RevokePrefixWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-prefix/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + return c.RevokeForceWithContext(context.Background(), id) +} + +func (c *Sys) RevokeForceWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-force/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + return c.RevokeWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) RevokeWithOptionsWithContext(ctx context.Context, opts *RevokeOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest(http.MethodPut, path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mfa.go b/vendor/github.com/hashicorp/vault/api/sys_mfa.go new file mode 100644 index 00000000000..a1ba1bd80f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mfa.go @@ -0,0 +1,45 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +func (c *Sys) MFAValidate(requestID string, payload map[string]interface{}) (*Secret, error) { + return c.MFAValidateWithContext(context.Background(), requestID, payload) +} + +func (c *Sys) MFAValidateWithContext(ctx context.Context, requestID string, payload map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "mfa_request_id": requestID, + "mfa_payload": payload, + } + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mfa/validate")) + if err := r.SetJSONBody(body); err != nil { + return nil, fmt.Errorf("failed to set request body: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to parse secret from response: %w", err) + } + + if secret == nil { + return nil, fmt.Errorf("data from server response is empty") + } + + return secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go new file mode 100644 index 00000000000..405d40f8efc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -0,0 +1,71 @@ +package api + +import ( + "bufio" + "context" + "fmt" + "net/http" +) + +// Monitor returns a channel that outputs strings containing the log messages +// coming from the server. +func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (chan string, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/monitor") + + if logLevel == "" { + r.Params.Add("log_level", "info") + } else { + r.Params.Add("log_level", logLevel) + } + + if logFormat == "" { + r.Params.Add("log_format", "standard") + } else { + r.Params.Add("log_format", logFormat) + } + + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + + go func() { + scanner := bufio.NewScanner(resp.Body) + droppedCount := 0 + + defer close(logCh) + defer resp.Body.Close() + + for { + if ctx.Err() != nil { + return + } + + if !scanner.Scan() { + return + } + + logMessage := scanner.Text() + + if droppedCount > 0 { + select { + case logCh <- fmt.Sprintf("Monitor dropped %d logs during monitor request\n", droppedCount): + droppedCount = 0 + default: + droppedCount++ + continue + } + } + + select { + case logCh <- logMessage: + default: + droppedCount++ + } + } + }() + + return logCh, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go new file mode 100644 index 00000000000..f55133cec4c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -0,0 +1,334 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + return c.ListMountsWithContext(context.Background()) +} + +func (c *Sys) ListMountsWithContext(ctx context.Context) (map[string]*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/mounts") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*MountOutput{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + return c.MountWithContext(context.Background(), path, mountInfo) +} + +func (c *Sys) MountWithContext(ctx context.Context, path string, mountInfo *MountInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(mountInfo); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + return c.UnmountWithContext(context.Background(), path) +} + +func (c *Sys) UnmountWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Remount wraps RemountWithContext using context.Background. +func (c *Sys) Remount(from, to string) error { + return c.RemountWithContext(context.Background(), from, to) +} + +// RemountWithContext kicks off a remount operation, polls the status endpoint using +// the migration ID till either success or failure state is observed +func (c *Sys) RemountWithContext(ctx context.Context, from, to string) error { + remountResp, err := c.StartRemountWithContext(ctx, from, to) + if err != nil { + return err + } + + for { + remountStatusResp, err := c.RemountStatusWithContext(ctx, remountResp.MigrationID) + if err != nil { + return err + } + if remountStatusResp.MigrationInfo.MigrationStatus == "success" { + return nil + } + if remountStatusResp.MigrationInfo.MigrationStatus == "failure" { + return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID) + } + time.Sleep(1 * time.Second) + } +} + +// StartRemount wraps StartRemountWithContext using context.Background. +func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { + return c.StartRemountWithContext(context.Background(), from, to) +} + +// StartRemountWithContext kicks off a mount migration and returns a response with the migration ID +func (c *Sys) StartRemountWithContext(ctx context.Context, from, to string) (*MountMigrationOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RemountStatus wraps RemountStatusWithContext using context.Background. +func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) { + return c.RemountStatusWithContext(context.Background(), migrationID) +} + +// RemountStatusWithContext checks the status of a mount migration operation with the provided ID +func (c *Sys) RemountStatusWithContext(ctx context.Context, migrationID string) (*MountMigrationStatusOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationStatusOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + return c.TuneMountWithContext(context.Background(), path, config) +} + +func (c *Sys) TuneMountWithContext(ctx context.Context, path string, config MountConfigInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(config); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + return c.MountConfigWithContext(context.Background(), path) +} + +func (c *Sys) MountConfigWithContext(ctx context.Context, path string) (*MountConfigOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountConfigOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type MountInput struct { + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + Options map[string]string `json:"options"` + + // Deprecated: Newer server responses should be returning this information in the + // Type field (json: "type") instead. + PluginName string `json:"plugin_name,omitempty"` +} + +type MountConfigInput struct { + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + PluginVersion string `json:"plugin_version,omitempty"` + UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountOutput struct { + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"` + RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"` + RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"` + DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type UserLockoutConfigInput struct { + LockoutThreshold string `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration string `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterResetDuration string `json:"lockout_counter_reset_duration,omitempty" structs:"lockout_counter_reset_duration" mapstructure:"lockout_counter_reset_duration"` + DisableLockout *bool `json:"lockout_disable,omitempty" structs:"lockout_disable" mapstructure:"lockout_disable"` +} + +type UserLockoutConfigOutput struct { + LockoutThreshold uint `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration int `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterReset int `json:"lockout_counter_reset,omitempty" structs:"lockout_counter_reset" mapstructure:"lockout_counter_reset"` + DisableLockout *bool `json:"disable_lockout,omitempty" structs:"disable_lockout" mapstructure:"disable_lockout"` +} + +type MountMigrationOutput struct { + MigrationID string `mapstructure:"migration_id"` +} + +type MountMigrationStatusOutput struct { + MigrationID string `mapstructure:"migration_id"` + MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"` +} + +type MountMigrationStatusInfo struct { + SourceMount string `mapstructure:"source_mount"` + TargetMount string `mapstructure:"target_mount"` + MigrationStatus string `mapstructure:"status"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go new file mode 100644 index 00000000000..05dce293998 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -0,0 +1,379 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/mitchellh/mapstructure" +) + +// ListPluginsInput is used as input to the ListPlugins function. +type ListPluginsInput struct { + // Type of the plugin. Required. + Type PluginType `json:"type"` +} + +// ListPluginsResponse is the response from the ListPlugins call. +type ListPluginsResponse struct { + // PluginsByType is the list of plugins by type. + PluginsByType map[PluginType][]string `json:"types"` + + Details []PluginDetails `json:"details,omitempty"` + + // Names is the list of names of the plugins. + // + // Deprecated: Newer server responses should be returning PluginsByType (json: + // "types") instead. + Names []string `json:"names"` +} + +type PluginDetails struct { + Type string `json:"type"` + Name string `json:"name"` + Version string `json:"version,omitempty"` + Builtin bool `json:"builtin"` + DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` +} + +// ListPlugins wraps ListPluginsWithContext using context.Background. +func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + return c.ListPluginsWithContext(context.Background(), i) +} + +// ListPluginsWithContext lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (*ListPluginsResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/catalog")) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + result := &ListPluginsResponse{ + PluginsByType: make(map[PluginType][]string), + } + switch i.Type { + case PluginTypeUnknown: + for _, pluginType := range PluginTypes { + pluginsRaw, ok := secret.Data[pluginType.String()] + if !ok { + continue + } + + pluginsIfc, ok := pluginsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginType.String()) + } + + plugins := make([]string, 0, len(pluginsIfc)) + for _, nameIfc := range pluginsIfc { + name, ok := nameIfc.(string) + if !ok { + continue + } + plugins = append(plugins, name) + } + result.PluginsByType[pluginType] = plugins + } + default: + pluginsRaw, ok := secret.Data[i.Type.String()] + if !ok { + return nil, fmt.Errorf("no %s entry in returned data", i.Type.String()) + } + + var respKeys []string + if err := mapstructure.Decode(pluginsRaw, &respKeys); err != nil { + return nil, err + } + result.PluginsByType[i.Type] = respKeys + } + + if detailed, ok := secret.Data["detailed"]; ok { + var details []PluginDetails + if err := mapstructure.Decode(detailed, &details); err != nil { + return nil, err + } + + switch i.Type { + case PluginTypeUnknown: + result.Details = details + default: + // Filter for just the queried type. + for _, entry := range details { + if entry.Type == i.Type.String() { + result.Details = append(result.Details, entry) + } + } + } + } + + return result, nil +} + +// GetPluginInput is used as input to the GetPlugin function. +type GetPluginInput struct { + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + Version string `json:"version"` +} + +// GetPluginResponse is the response from the GetPlugin call. +type GetPluginResponse struct { + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` + DeprecationStatus string `json:"deprecation_status,omitempty"` + Version string `json:"version,omitempty"` +} + +// GetPlugin wraps GetPluginWithContext using context.Background. +func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + return c.GetPluginWithContext(context.Background(), i) +} + +// GetPluginWithContext retrieves information about the plugin. +func (c *Sys) GetPluginWithContext(ctx context.Context, i *GetPluginInput) (*GetPluginResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + if i.Version != "" { + req.Params.Set("version", i.Version) + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginInput is used as input to the RegisterPlugin function. +type RegisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + + // Args is the list of args to spawn the process with. + Args []string `json:"args,omitempty"` + + // Command is the command to run. + Command string `json:"command,omitempty"` + + // SHA256 is the shasum of the plugin. + SHA256 string `json:"sha256,omitempty"` + + // Version is the optional version of the plugin being registered + Version string `json:"version,omitempty"` +} + +// RegisterPlugin wraps RegisterPluginWithContext using context.Background. +func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + return c.RegisterPluginWithContext(context.Background(), i) +} + +// RegisterPluginWithContext registers the plugin with the given information. +func (c *Sys) RegisterPluginWithContext(ctx context.Context, i *RegisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginInput is used as input to the DeregisterPlugin function. +type DeregisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + + // Version of the plugin. Optional. + Version string `json:"version,omitempty"` +} + +// DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. +func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + return c.DeregisterPluginWithContext(context.Background(), i) +} + +// DeregisterPluginWithContext removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + req.Params.Set("version", i.Version) + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// ReloadPluginInput is used as input to the ReloadPlugin function. +type ReloadPluginInput struct { + // Plugin is the name of the plugin to reload, as registered in the plugin catalog + Plugin string `json:"plugin"` + + // Mounts is the array of string mount paths of the plugin backends to reload + Mounts []string `json:"mounts"` + + // Scope is the scope of the plugin reload + Scope string `json:"scope"` +} + +// ReloadPlugin wraps ReloadPluginWithContext using context.Background. +func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { + return c.ReloadPluginWithContext(context.Background(), i) +} + +// ReloadPluginWithContext reloads mounted plugin backends, possibly returning +// reloadId for a cluster scoped reload +func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend" + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if i.Scope == "global" { + // Get the reload id + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return "", parseErr + } + if _, ok := secret.Data["reload_id"]; ok { + return secret.Data["reload_id"].(string), nil + } + } + return "", err +} + +// ReloadStatus is the status of an individual node's plugin reload +type ReloadStatus struct { + Timestamp time.Time `json:"timestamp" mapstructure:"timestamp"` + Error string `json:"error" mapstructure:"error"` +} + +// ReloadStatusResponse is the combined response of all known completed plugin reloads +type ReloadStatusResponse struct { + ReloadID string `mapstructure:"reload_id"` + Results map[string]*ReloadStatus `mapstructure:"results"` +} + +// ReloadPluginStatusInput is used as input to the ReloadStatusPlugin function. +type ReloadPluginStatusInput struct { + // ReloadID is the ID of the reload operation + ReloadID string `json:"reload_id"` +} + +// ReloadPluginStatus wraps ReloadPluginStatusWithContext using context.Background. +func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + return c.ReloadPluginStatusWithContext(context.Background(), reloadStatusInput) +} + +// ReloadPluginStatusWithContext retrieves the status of a reload operation +func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend/status" + req := c.c.NewRequest(http.MethodGet, path) + req.Params.Add("reload_id", reloadStatusInput.ReloadID) + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp != nil { + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return nil, err + } + + var r ReloadStatusResponse + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &r, + }) + if err != nil { + return nil, err + } + err = d.Decode(secret.Data) + if err != nil { + return nil, err + } + return &r, nil + } + return nil, nil +} + +// catalogPathByType is a helper to construct the proper API path by plugin type +func catalogPathByType(pluginType PluginType, name string) string { + path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) + + // Backwards compat, if type is not provided then use old path + if pluginType == PluginTypeUnknown { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) + } + + return path +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go new file mode 100644 index 00000000000..4a4f91b08c7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -0,0 +1,134 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListPolicies() ([]string, error) { + return c.ListPoliciesWithContext(context.Background()) +} + +func (c *Sys) ListPoliciesWithContext(ctx context.Context) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result []string + err = mapstructure.Decode(secret.Data["keys"], &result) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + return c.GetPolicyWithContext(context.Background(), name) +} + +func (c *Sys) GetPolicyWithContext(ctx context.Context, name string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + if policyRaw, ok := secret.Data["policy"]; ok { + return policyRaw.(string), nil + } + + return "", fmt.Errorf("no policy found in response") +} + +func (c *Sys) PutPolicy(name, rules string) error { + return c.PutPolicyWithContext(context.Background(), name, rules) +} + +func (c *Sys) PutPolicyWithContext(ctx context.Context, name, rules string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "policy": rules, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + return c.DeletePolicyWithContext(context.Background(), name) +} + +func (c *Sys) DeletePolicyWithContext(ctx context.Context, name string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go new file mode 100644 index 00000000000..7806a1418df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -0,0 +1,382 @@ +package api + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/mitchellh/mapstructure" +) + +var ErrIncompleteSnapshot = errors.New("incomplete snapshot, unable to read SHA256SUMS.sealed file") + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` + AutoJoinScheme string `json:"auto_join_scheme"` + AutoJoinPort uint `json:"auto_join_port"` + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} + +// AutopilotConfig is used for querying/setting the Autopilot configuration. +type AutopilotConfig struct { + CleanupDeadServers bool `json:"cleanup_dead_servers" mapstructure:"cleanup_dead_servers"` + LastContactThreshold time.Duration `json:"last_contact_threshold" mapstructure:"-"` + DeadServerLastContactThreshold time.Duration `json:"dead_server_last_contact_threshold" mapstructure:"-"` + MaxTrailingLogs uint64 `json:"max_trailing_logs" mapstructure:"max_trailing_logs"` + MinQuorum uint `json:"min_quorum" mapstructure:"min_quorum"` + ServerStabilizationTime time.Duration `json:"server_stabilization_time" mapstructure:"-"` + DisableUpgradeMigration bool `json:"disable_upgrade_migration" mapstructure:"disable_upgrade_migration"` +} + +// MarshalJSON makes the autopilot config fields JSON compatible +func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "cleanup_dead_servers": ac.CleanupDeadServers, + "last_contact_threshold": ac.LastContactThreshold.String(), + "dead_server_last_contact_threshold": ac.DeadServerLastContactThreshold.String(), + "max_trailing_logs": ac.MaxTrailingLogs, + "min_quorum": ac.MinQuorum, + "server_stabilization_time": ac.ServerStabilizationTime.String(), + "disable_upgrade_migration": ac.DisableUpgradeMigration, + }) +} + +// UnmarshalJSON parses the autopilot config JSON blob +func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + + conf := data.(map[string]interface{}) + if err = mapstructure.WeakDecode(conf, ac); err != nil { + return err + } + if ac.LastContactThreshold, err = parseutil.ParseDurationSecond(conf["last_contact_threshold"]); err != nil { + return err + } + if ac.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(conf["dead_server_last_contact_threshold"]); err != nil { + return err + } + if ac.ServerStabilizationTime, err = parseutil.ParseDurationSecond(conf["server_stabilization_time"]); err != nil { + return err + } + return nil +} + +// AutopilotState represents the response of the raft autopilot state API +type AutopilotState struct { + Healthy bool `mapstructure:"healthy"` + FailureTolerance int `mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `mapstructure:"servers"` + Leader string `mapstructure:"leader"` + Voters []string `mapstructure:"voters"` + NonVoters []string `mapstructure:"non_voters"` + RedundancyZones map[string]AutopilotZone `mapstructure:"redundancy_zones,omitempty"` + Upgrade *AutopilotUpgrade `mapstructure:"upgrade_info,omitempty"` + OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` +} + +// AutopilotServer represents the server blocks in the response of the raft +// autopilot state API. +type AutopilotServer struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Address string `mapstructure:"address"` + NodeStatus string `mapstructure:"node_status"` + LastContact string `mapstructure:"last_contact"` + LastTerm uint64 `mapstructure:"last_term"` + LastIndex uint64 `mapstructure:"last_index"` + Healthy bool `mapstructure:"healthy"` + StableSince string `mapstructure:"stable_since"` + Status string `mapstructure:"status"` + Version string `mapstructure:"version"` + UpgradeVersion string `mapstructure:"upgrade_version,omitempty"` + RedundancyZone string `mapstructure:"redundancy_zone,omitempty"` + NodeType string `mapstructure:"node_type,omitempty"` +} + +type AutopilotZone struct { + Servers []string `mapstructure:"servers,omitempty"` + Voters []string `mapstructure:"voters,omitempty"` + FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` +} + +type AutopilotUpgrade struct { + Status string `mapstructure:"status"` + TargetVersion string `mapstructure:"target_version,omitempty"` + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + TargetVersionReadReplicas []string `mapstructure:"target_version_read_replicas,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` + OtherVersionReadReplicas []string `mapstructure:"other_version_read_replicas,omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` +} + +// RaftJoin wraps RaftJoinWithContext using context.Background. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + return c.RaftJoinWithContext(context.Background(), opts) +} + +// RaftJoinWithContext adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoinWithContext(ctx context.Context, opts *RaftJoinRequest) (*RaftJoinResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot wraps RaftSnapshotWithContext using context.Background. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + return c.RaftSnapshotWithContext(context.Background(), snapWriter) +} + +// RaftSnapshotWithContext invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) error { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + // Make sure that the last file in the archive, SHA256SUMS.sealed, is present + // and non-empty. This is to catch cases where the snapshot failed midstream, + // e.g. due to a problem with the seal that prevented encryption of that file. + var wg sync.WaitGroup + wg.Add(1) + var verified bool + + rPipe, wPipe := io.Pipe() + dup := io.TeeReader(resp.Body, wPipe) + go func() { + defer func() { + io.Copy(ioutil.Discard, rPipe) + rPipe.Close() + wg.Done() + }() + + uncompressed, err := gzip.NewReader(rPipe) + if err != nil { + return + } + + t := tar.NewReader(uncompressed) + var h *tar.Header + for { + h, err = t.Next() + if err != nil { + return + } + if h.Name != "SHA256SUMS.sealed" { + continue + } + var b []byte + b, err = ioutil.ReadAll(t) + if err != nil || len(b) == 0 { + return + } + verified = true + return + } + }() + + // Copy bytes from dup to snapWriter. This will have a side effect that + // everything read from dup will be written to wPipe. + _, err = io.Copy(snapWriter, dup) + wPipe.Close() + if err != nil { + rPipe.CloseWithError(err) + return err + } + wg.Wait() + + if !verified { + return ErrIncompleteSnapshot + } + return nil +} + +// RaftSnapshotRestore wraps RaftSnapshotRestoreWithContext using context.Background. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + return c.RaftSnapshotRestoreWithContext(context.Background(), snapReader, force) +} + +// RaftSnapshotRestoreWithContext reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestoreWithContext(ctx context.Context, snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + + r := c.c.NewRequest(http.MethodPost, path) + r.Body = snapReader + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RaftAutopilotState wraps RaftAutopilotStateWithContext using context.Background. +func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.Background()) +} + +// RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. +func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotState + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RaftAutopilotConfiguration wraps RaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.Background()) +} + +// RaftAutopilotConfigurationWithContext fetches the autopilot config. +func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotConfig + if err = mapstructure.Decode(secret.Data, &result); err != nil { + return nil, err + } + if result.LastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["last_contact_threshold"]); err != nil { + return nil, err + } + if result.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["dead_server_last_contact_threshold"]); err != nil { + return nil, err + } + if result.ServerStabilizationTime, err = parseutil.ParseDurationSecond(secret.Data["server_stabilization_time"]); err != nil { + return nil, err + } + + return &result, err +} + +// PutRaftAutopilotConfiguration wraps PutRaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) PutRaftAutopilotConfiguration(opts *AutopilotConfig) error { + return c.PutRaftAutopilotConfigurationWithContext(context.Background(), opts) +} + +// PutRaftAutopilotConfigurationWithContext allows modifying the raft autopilot configuration +func (c *Sys) PutRaftAutopilotConfigurationWithContext(ctx context.Context, opts *AutopilotConfig) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/autopilot/configuration") + + if err := r.SetJSONBody(opts); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go new file mode 100644 index 00000000000..2ac8a4743bc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -0,0 +1,479 @@ +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyRecoveryKeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyRecoveryKeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + return c.RekeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + return c.RekeyRecoveryKeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + return c.RekeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + return c.RekeyRecoveryKeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyRecoveryKeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveRecoveryBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + return c.RekeyDeleteBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + return c.RekeyDeleteRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteRecoveryBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyRecoveryKeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyRetrieveResponse struct { + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go new file mode 100644 index 00000000000..fa86886c35b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -0,0 +1,102 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +func (c *Sys) Rotate() error { + return c.RotateWithContext(context.Background()) +} + +func (c *Sys) RotateWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/rotate") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + return c.KeyStatusWithContext(context.Background()) +} + +func (c *Sys) KeyStatusWithContext(ctx context.Context) (*KeyStatus, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/key-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + encryptionsRaw, ok := secret.Data["encryptions"] + if ok { + encryptions, ok := encryptionsRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert encryptions to a number") + } + encryptions64, err := encryptions.Int64() + if err != nil { + return nil, err + } + result.Encryptions = int(encryptions64) + } + + return &result, err +} + +type KeyStatus struct { + Term int `json:"term"` + InstallTime time.Time `json:"install_time"` + Encryptions int `json:"encryptions"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go new file mode 100644 index 00000000000..0522f2a42b7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -0,0 +1,119 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + return c.SealStatusWithContext(context.Background()) +} + +func (c *Sys) SealStatusWithContext(ctx context.Context) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/seal-status") + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Seal() error { + return c.SealWithContext(context.Background()) +} + +func (c *Sys) SealWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/seal") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + return c.ResetUnsealProcessWithContext(context.Background()) +} + +func (c *Sys) ResetUnsealProcessWithContext(ctx context.Context) (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + return c.UnsealWithContext(context.Background(), shard) +} + +func (c *Sys) UnsealWithContext(ctx context.Context, shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { + return c.UnsealWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) UnsealWithOptionsWithContext(ctx context.Context, opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` + HCPLinkStatus string `json:"hcp_link_status,omitempty"` + HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +type UnsealOpts struct { + Key string `json:"key"` + Reset bool `json:"reset"` + Migrate bool `json:"migrate"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go new file mode 100644 index 00000000000..833f31a6f76 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -0,0 +1,23 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) StepDown() error { + return c.StepDownWithContext(context.Background()) +} + +func (c *Sys) StepDownWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/step-down") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md index 73c0c462de0..1d80c42a530 100644 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -325,35 +325,35 @@ The content compressed in this mode is fully compatible with the standard decode Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): -| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | -|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| -| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | -| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | -| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | -| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | -| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | -| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | -| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | -| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | -| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | -| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | -| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | -| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | -| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | -| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | -| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | -| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | -| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | -| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | -| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | -| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | -| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | -| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | ### Legend -* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. -* `S2 throughput`: Throughput of S2 in MB/s. +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. * `S2 % smaller`: How many percent of the Snappy output size is S2 better. * `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. * `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. @@ -361,7 +361,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. -Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. @@ -404,15 +404,15 @@ The "better" compression mode will actively look for shorter matches, which is w Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: | File | S2 Throughput | S2 throughput | -|--------------------------------|--------------|---------------| -| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | -| 10gb.tar.s2 | 1.30x | 867.07 MB/s | -| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | -| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | -| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | -| enwik9.s2 | 1.67x | 681.53 MB/s | -| adresser.json.s2 | 3.41x | 4230.53 MB/s | -| silesia.tar.s2 | 1.52x | 811.58 | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | Even though S2 typically compresses better than Snappy, decompression speed is always better. @@ -450,14 +450,14 @@ The most reliable is a wide dataset. For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. -| * | Input | Output | Reduction | MB/s | -|-------------------|------------|------------|-----------|--------| -| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | -| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | -| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | -| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | -| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | -| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". "Better" mode provides the same compression speed as LZ4 with better compression ratio. @@ -489,42 +489,23 @@ AMD64 assembly is use for both S2 and Snappy. | Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | |-----------------------|-------------|---------|--------------|-------------|-------------|-------------| -| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | -| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | -| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | -| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | -| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | -| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | -| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | -| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | -| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | -| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | -| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | -| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | -| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | -| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | -| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | - - -| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | -|-----------------------|-------------|------------------|----------|--------------| -| html | 22.31% | 7.58% | 1.07x | 1.20x | -| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | -| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | -| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | -| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | -| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | -| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | -| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | -| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | -| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | -| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | -| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | -| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | -| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | -| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. @@ -543,42 +524,23 @@ So individual benchmarks should only be seen as a guideline and the overall pict | Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | |-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| -| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | -| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | -| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | -| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | -| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | -| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | -| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | -| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | -| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | -| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | -| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | -| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | -| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | -| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | -| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | - - -| Relative Perf | Snappy size | Better size | Better Speed | Better dec | -|-----------------------|-------------|-------------|--------------|------------| -| html | 22.31% | 13.18% | 0.48x | 0.98x | -| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | -| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | -| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | -| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | -| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | -| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | -| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | -| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | -| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | -| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | -| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | -| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | -| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | -| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + Except for the mostly incompressible JPEG image compression is better and usually in the double digits in terms of percentage reduction over Snappy. @@ -605,29 +567,29 @@ Some examples compared on 16 core CPU, amd64 assembly used: ``` * enwik10 -Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s -Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s -Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s * github-june-2days-2019.json -Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s -Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s -Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s * nyc-taxi-data-10M.csv -Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s -Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s -Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s * 10gb.tar -Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s -Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s -Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ * consensus.db.10gb -Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s -Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s -Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s ``` Decompression speed should be around the same as using the 'better' compression mode. @@ -648,10 +610,10 @@ If you would like more control, you can use the s2 package as described below: Snappy compatible blocks can be generated with the S2 encoder. Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace -| Snappy | S2 replacement | -|----------------------------|-------------------------| -| snappy.Encode(...) | s2.EncodeSnappy(...) | -| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | `s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. @@ -660,12 +622,12 @@ Compression and speed is typically a bit better `MaxEncodedLen` is also smaller Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: -| Encoder | Size | MB/s | Reduction | -|-----------------------|------------|------------|------------ -| snappy.Encode | 1128706759 | 725.59 | 71.89% | -| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | -| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | -| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | ## Streams @@ -835,6 +797,13 @@ This is done using the regular "Skip" function: This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + ## Index Format: Each block is structured as a snappy skippable block, with the chunk ID 0x99. @@ -844,20 +813,20 @@ The block can be read from the front, but contains information so it can be read Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), with un-encoded value length of 64 bits, unless other limits are specified. -| Content | Format | -|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| ID, `[1]byte` | Always 0x99. | -| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | -| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | -| UncompressedSize, Varint | Total Uncompressed size. | -| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | -| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | -| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | -| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | -| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | -| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | -| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | -| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | For regular streams the uncompressed offsets are fully predictable, so `HasUncompressedOffsets` allows to specify that compressed blocks all have @@ -929,6 +898,7 @@ To decode from any given uncompressed offset `(wantOffset)`: See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + # Format Extensions * Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. @@ -951,10 +921,11 @@ The length is specified by reading the 3-bit length specified in the tag and dec | 7 | 65540 + read 3 bytes | This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. Lengths are stored as little endian values. -The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. Default streaming block size is 1MB. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 27c0f3c2c45..00c5cc72c24 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -952,7 +952,11 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { // Seek allows seeking in compressed data. func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { if r.err != nil { - return 0, r.err + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil } if offset == 0 && whence == io.SeekCurrent { return r.blockStart + int64(r.i), nil diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go index 1074ebd215e..11300c3a810 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -28,6 +28,9 @@ func s2Decode(dst, src []byte) int { // As long as we can read at least 5 bytes... for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 switch src[s] & 0x03 { case tagLiteral: x := uint32(src[s] >> 2) @@ -38,14 +41,19 @@ func s2Decode(dst, src []byte) int { s += 2 x = uint32(src[s-1]) case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 s += 3 - x = uint32(src[s-2]) | uint32(src[s-1])<<8 case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 s += 4 - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 s += 5 - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 } length = int(x) + 1 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { @@ -62,8 +70,8 @@ func s2Decode(dst, src []byte) int { case tagCopy1: s += 2 - length = int(src[s-2]) >> 2 & 0x7 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 if toffset == 0 { if debug { fmt.Print("(repeat) ") @@ -71,14 +79,16 @@ func s2Decode(dst, src []byte) int { // keep last offset switch length { case 5: + length = int(src[s]) + 4 s += 1 - length = int(uint32(src[s-1])) + 4 case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) s += 2 - length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) s += 3 - length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) default: // 0-> 4 } } else { @@ -86,14 +96,16 @@ func s2Decode(dst, src []byte) int { } length += 4 case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 s += 3 - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 s += 5 - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) } if offset <= 0 || d < offset || length > len(dst)-d { diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index 8b16c38a68f..54c71d3b5d9 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -58,8 +58,9 @@ func encodeGo(dst, src []byte) []byte { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockGo(dst, src []byte) (d int) { // Initialize the hash table. const ( diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index e612225f4d3..6b93daa5ae6 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -8,8 +8,9 @@ package s2 // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -43,8 +44,9 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -78,8 +80,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -112,8 +115,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go index 4bc80bc6a70..1b7ea394fae 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_best.go +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -15,8 +15,9 @@ import ( // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBest(dst, src []byte) (d int) { // Initialize the hash tables. const ( @@ -176,14 +177,21 @@ func encodeBlockBest(dst, src []byte) (d int) { best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) } // Search for a match at best match end, see if that is better. - if sAt := best.s + best.length; sAt < sLimit { - sBack := best.s - backL := best.length + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning // Load initial values cv = load64(src, sBack) - // Search for mismatch + + // Grab candidates... next := lTable[hash8(load64(src, sAt), lTableBits)] - //next := sTable[hash4(load64(src, sAt), sTableBits)] if checkAt := getCur(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) @@ -191,6 +199,16 @@ func encodeBlockBest(dst, src []byte) (d int) { if checkAt := getPrev(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } } } } @@ -288,8 +306,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBestSnappy(dst, src []byte) (d int) { // Initialize the hash tables. const ( @@ -546,6 +565,7 @@ emitRemainder: // emitCopySize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopySize(offset, length int) int { @@ -584,6 +604,7 @@ func emitCopySize(offset, length int) int { // emitCopyNoRepeatSize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopyNoRepeatSize(offset, length int) int { diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go index 943215b8ae8..3b66ba42bf3 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -42,8 +42,9 @@ func hash8(u uint64, h uint8) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -56,7 +57,7 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Initialize the hash tables. const ( // Long hash matches. - lTableBits = 16 + lTableBits = 17 maxLTableSize = 1 << lTableBits // Short hash matches. @@ -97,9 +98,26 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { lTable[hashL] = uint32(s) sTable[hashS] = uint32(s) + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + // Check repeat at offset checkRep. const checkRep = 1 - if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { base := s + checkRep // Extend back for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { @@ -109,8 +127,8 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { d += emitLiteral(dst[d:], src[nextEmit:base]) // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep for s < len(src) { if len(src)-s < 8 { if src[s] == src[candidate] { @@ -127,28 +145,40 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { s += 8 candidate += 8 } - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) nextEmit = s if s >= sLimit { goto emitRemainder } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } cv = load64(src, s) continue } - if uint32(cv) == load32(src, candidateL) { + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { break } // Check our short candidate - if uint32(cv) == load32(src, candidateS) { + if uint32(cv) == uint32(valShort) { // Try a long candidate at s+1 hashL = hash7(cv>>8, lTableBits) candidateL = int(lTable[hashL]) @@ -227,21 +257,29 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) - cv = load64(src, s) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) - lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: @@ -260,8 +298,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -402,21 +441,29 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) - cv = load64(src, s) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) - lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go index 94784b82ad6..db08fc355e1 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -12,6 +12,7 @@ import ( // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlock(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -25,6 +26,7 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetter(dst, src []byte) (d int) { return encodeBlockBetterGo(dst, src) @@ -35,6 +37,7 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetterSnappy(dst, src []byte) (d int) { return encodeBlockBetterSnappyGo(dst, src) @@ -45,6 +48,7 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockSnappy(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -56,6 +60,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 0 <= len(lit) && len(lit) <= math.MaxUint32 func emitLiteral(dst, lit []byte) int { @@ -146,6 +151,7 @@ func emitRepeat(dst []byte, offset, length int) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -214,6 +220,7 @@ func emitCopy(dst []byte, offset, length int) int { // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -273,8 +280,8 @@ func emitCopyNoRepeat(dst []byte, offset, length int) int { // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) // +// len(a) <= len(b) func matchLen(a []byte, b []byte) int { b = b[:len(a)] var checked int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go index 88f27c09900..7e00bac3eae 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm package s2 @@ -150,8 +149,9 @@ func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes with margin of 0 bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 // //go:noescape func emitLiteral(dst []byte, lit []byte) int @@ -165,9 +165,10 @@ func emitRepeat(dst []byte, offset int, length int) int // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopy(dst []byte, offset int, length int) int @@ -175,9 +176,10 @@ func emitCopy(dst []byte, offset int, length int) int // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopyNoRepeat(dst []byte, offset int, length int) int @@ -185,7 +187,8 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) +// +// len(a) <= len(b) // //go:noescape func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s index 36915d9495c..81a487d6def 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm #include "textflag.h" @@ -5743,9 +5742,9 @@ emit_literal_done_emit_remainder_encodeBlockAsm8B: // func encodeBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm(SB), $327704-56 +TEXT ·encodeBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -5797,27 +5796,37 @@ check_maxskip_cont_encodeBetterBlockAsm: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm candidateS_match_encodeBetterBlockAsm: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -6590,52 +6599,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm: match_nolit_dst_ok_encodeBetterBlockAsm: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm emit_remainder_encodeBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -6815,9 +6821,9 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm: // func encodeBetterBlockAsm4MB(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56 +TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -6869,27 +6875,37 @@ check_maxskip_cont_encodeBetterBlockAsm4MB: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm4MB - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm4MB - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm4MB + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB candidateS_match_encodeBetterBlockAsm4MB: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -7600,52 +7616,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: match_nolit_dst_ok_encodeBetterBlockAsm4MB: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm4MB + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm4MB emit_remainder_encodeBetterBlockAsm4MB: MOVQ src_len+32(FP), CX @@ -7871,12 +7884,22 @@ search_loop_encodeBetterBlockAsm12B: MOVL 65560(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B candidateS_match_encodeBetterBlockAsm12B: SHRQ $0x08, DI @@ -8447,52 +8470,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm12B: match_nolit_dst_ok_encodeBetterBlockAsm12B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x34, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x34, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 65560(SP)(R11*4) + MOVL R14, 65560(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm12B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x32, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm12B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm12B emit_remainder_encodeBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -8707,12 +8727,22 @@ search_loop_encodeBetterBlockAsm10B: MOVL 16408(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B candidateS_match_encodeBetterBlockAsm10B: SHRQ $0x08, DI @@ -9283,52 +9313,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm10B: match_nolit_dst_ok_encodeBetterBlockAsm10B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x36, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x36, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 16408(SP)(R11*4) + MOVL R14, 16408(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm10B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x34, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm10B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm10B emit_remainder_encodeBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -9543,12 +9570,22 @@ search_loop_encodeBetterBlockAsm8B: MOVL 4120(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B candidateS_match_encodeBetterBlockAsm8B: SHRQ $0x08, DI @@ -10105,52 +10142,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm8B: match_nolit_dst_ok_encodeBetterBlockAsm8B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x38, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x38, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 4120(SP)(R11*4) + MOVL R14, 4120(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm8B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x36, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm8B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm8B emit_remainder_encodeBetterBlockAsm8B: MOVQ src_len+32(FP), CX @@ -14287,9 +14321,9 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: // func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56 +TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -14341,27 +14375,37 @@ check_maxskip_cont_encodeSnappyBetterBlockAsm: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm candidateS_match_encodeSnappyBetterBlockAsm: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -14685,52 +14729,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: match_nolit_dst_ok_encodeSnappyBetterBlockAsm: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm emit_remainder_encodeSnappyBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -14964,12 +15005,22 @@ search_loop_encodeSnappyBetterBlockAsm64K: MOVL 262168(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm64K - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm64K - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm64K + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K candidateS_match_encodeSnappyBetterBlockAsm64K: SHRQ $0x08, DI @@ -15248,52 +15299,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x30, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 262168(SP)(R11*4) + MOVL R14, 262168(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x30, R8 SHLQ $0x08, R10 IMULQ SI, R10 SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm64K + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm64K emit_remainder_encodeSnappyBetterBlockAsm64K: MOVQ src_len+32(FP), CX @@ -15508,12 +15556,22 @@ search_loop_encodeSnappyBetterBlockAsm12B: MOVL 65560(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B candidateS_match_encodeSnappyBetterBlockAsm12B: SHRQ $0x08, DI @@ -15792,52 +15850,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x34, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x34, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 65560(SP)(R11*4) + MOVL R14, 65560(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x32, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm12B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm12B emit_remainder_encodeSnappyBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -16052,12 +16107,22 @@ search_loop_encodeSnappyBetterBlockAsm10B: MOVL 16408(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B candidateS_match_encodeSnappyBetterBlockAsm10B: SHRQ $0x08, DI @@ -16336,52 +16401,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x36, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x36, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 16408(SP)(R11*4) + MOVL R14, 16408(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x34, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm10B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm10B emit_remainder_encodeSnappyBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -16596,12 +16658,22 @@ search_loop_encodeSnappyBetterBlockAsm8B: MOVL 4120(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B candidateS_match_encodeSnappyBetterBlockAsm8B: SHRQ $0x08, DI @@ -16878,52 +16950,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x38, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x38, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 4120(SP)(R11*4) + MOVL R14, 4120(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x36, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm8B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm8B emit_remainder_encodeSnappyBetterBlockAsm8B: MOVQ src_len+32(FP), CX diff --git a/vendor/github.com/opentracing/opentracing-go/.golangci.yml b/vendor/github.com/opentracing/opentracing-go/.golangci.yml new file mode 100644 index 00000000000..011a79406f4 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.golangci.yml @@ -0,0 +1,3 @@ +linters-settings: + staticcheck: + checks: [ "all", "-SA1019" ] diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml deleted file mode 100644 index b950e42965f..00000000000 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -matrix: - include: - - go: "1.13.x" - - go: "1.14.x" - - go: "tip" - env: - - LINT=true - - COVERAGE=true - -install: - - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi - - go get -u github.com/stretchr/testify/... - -script: - - make test - - go build ./... - - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi - - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go index 8c7932ce65b..6e964cf2f02 100644 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go @@ -178,7 +178,7 @@ func (s *MockSpan) Finish() { s.Lock() s.FinishTime = time.Now() s.Unlock() - s.tracer.recordSpan(s) + s.tracer.recordFinishedSpan(s) } // FinishWithOptions belongs to the Span interface @@ -205,7 +205,7 @@ func (s *MockSpan) FinishWithOptions(opts opentracing.FinishOptions) { } } - s.tracer.recordSpan(s) + s.tracer.recordFinishedSpan(s) } // String allows printing span for debugging diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go index 4533da7b1f7..02bd1658c71 100644 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go @@ -11,6 +11,7 @@ import ( func New() *MockTracer { t := &MockTracer{ finishedSpans: []*MockSpan{}, + startedSpans: []*MockSpan{}, injectors: make(map[interface{}]Injector), extractors: make(map[interface{}]Extractor), } @@ -34,10 +35,22 @@ func New() *MockTracer { type MockTracer struct { sync.RWMutex finishedSpans []*MockSpan + startedSpans []*MockSpan injectors map[interface{}]Injector extractors map[interface{}]Extractor } +// UnfinishedSpans returns all spans that have been started and not finished since the +// MockTracer was constructed or since the last call to its Reset() method. +func (t *MockTracer) UnfinishedSpans() []*MockSpan { + t.RLock() + defer t.RUnlock() + + spans := make([]*MockSpan, len(t.startedSpans)) + copy(spans, t.startedSpans) + return spans +} + // FinishedSpans returns all spans that have been Finish()'ed since the // MockTracer was constructed or since the last call to its Reset() method. func (t *MockTracer) FinishedSpans() []*MockSpan { @@ -54,6 +67,7 @@ func (t *MockTracer) FinishedSpans() []*MockSpan { func (t *MockTracer) Reset() { t.Lock() defer t.Unlock() + t.startedSpans = []*MockSpan{} t.finishedSpans = []*MockSpan{} } @@ -63,7 +77,10 @@ func (t *MockTracer) StartSpan(operationName string, opts ...opentracing.StartSp for _, o := range opts { o.Apply(&sso) } - return newMockSpan(t, operationName, sso) + + span := newMockSpan(t, operationName, sso) + t.recordStartedSpan(span) + return span } // RegisterInjector registers injector for given format @@ -98,8 +115,22 @@ func (t *MockTracer) Extract(format interface{}, carrier interface{}) (opentraci return extractor.Extract(carrier) } -func (t *MockTracer) recordSpan(span *MockSpan) { +func (t *MockTracer) recordStartedSpan(span *MockSpan) { + t.Lock() + defer t.Unlock() + t.startedSpans = append(t.startedSpans, span) +} + +func (t *MockTracer) recordFinishedSpan(span *MockSpan) { t.Lock() defer t.Unlock() t.finishedSpans = append(t.finishedSpans, span) + + for i := range t.startedSpans { + if t.startedSpans[i].SpanContext.SpanID == span.SpanContext.SpanID && + t.startedSpans[i].SpanContext.TraceID == span.SpanContext.TraceID { + t.startedSpans = append(t.startedSpans[:i], t.startedSpans[i+1:]...) + return + } + } } diff --git a/vendor/github.com/ryanuber/go-glob/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml new file mode 100644 index 00000000000..9d1ca3c378e --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - tip +script: + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 00000000000..bdfbd951497 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 00000000000..48f7fcb05a4 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 00000000000..e67db3be183 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 00000000000..a7828345fcc --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519†function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seedâ€. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000000..904b57e01d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc new file mode 100644 index 00000000000..730e569b069 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc @@ -0,0 +1 @@ +'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore new file mode 100644 index 00000000000..95a851586a5 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.gitignore @@ -0,0 +1,8 @@ +*~ +.*.swp +*.out +*.test +*.pem +*.cov +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml new file mode 100644 index 00000000000..391b99a4014 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml @@ -0,0 +1,45 @@ +language: go + +sudo: false + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: +- '1.14.x' +- '1.15.x' +- tip + +go_import_path: gopkg.in/square/go-jose.v2 + +before_script: +- export PATH=$HOME/.local/bin:$PATH + +before_install: +# Install encrypted gitcookies to get around bandwidth-limits +# that is causing Travis-CI builds to fail. For more info, see +# https://github.com/golang/go/issues/12933 +- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true +- bash .gitcookies.sh || true +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get github.com/stretchr/testify/assert +- go get github.com/stretchr/testify/require +- go get github.com/google/go-cmp/cmp +- go get golang.org/x/tools/cmd/cover || true +- go get code.google.com/p/go.tools/cmd/cover || true +- pip install cram --user + +script: +- go test . -v -covermode=count -coverprofile=profile.cov +- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov +- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov +- go test ./json -v # no coverage for forked encoding/json package +- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util +- cd .. + +after_success: +- gocovmerge *.cov */*.cov > merged.coverprofile +- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md new file mode 100644 index 00000000000..3305db0f653 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md new file mode 100644 index 00000000000..61b183651c0 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +[Individual Contributor License Agreement][1]. + + [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md new file mode 100644 index 00000000000..1791bfa8f67 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/README.md @@ -0,0 +1,118 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) +[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose) +[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). +Tables of supported algorithms are shown below. The library supports both +the compact and full serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 2](https://gopkg.in/square/go-jose.v2) +([branch](https://github.com/square/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version: + + import "gopkg.in/square/go-jose.v2" + +The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will +still receive backported bug fixes and security fixes, but otherwise +development is frozen. All new feature development takes place on the `v2` +branch. Version 2 also contains additional sub-packages such as the +[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation +contributed by [@shaxbee](https://github.com/shaxbee). + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example. diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go new file mode 100644 index 00000000000..b69aa0369c0 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + "golang.org/x/crypto/ed25519" + josecipher "gopkg.in/square/go-jose.v2/cipher" + "gopkg.in/square/go-jose.v2/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("square/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("square/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("square/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("square/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("square/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("square/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("square/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("square/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go new file mode 100644 index 00000000000..f6465c041f7 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("square/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, []byte(ciphertext[:offset])...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("square/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("square/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("square/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go new file mode 100644 index 00000000000..f62c3bdba5d --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go new file mode 100644 index 00000000000..093c646740b --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go new file mode 100644 index 00000000000..1d36d501510 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("square/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go new file mode 100644 index 00000000000..be7433e28f6 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go @@ -0,0 +1,542 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "gopkg.in/square/go-jose.v2/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if rcpts == nil || len(rcpts) == 0 { + return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil || err != nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go new file mode 100644 index 00000000000..dd1387f3f06 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON +Web Token support available in a sub-package. The library supports both the +compact and full serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go new file mode 100644 index 00000000000..70f7385c419 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go @@ -0,0 +1,185 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "gopkg.in/square/go-jose.v2/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/square/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64.RawURLEncoding.DecodeString(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md new file mode 100644 index 00000000000..86de5e5581f --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go new file mode 100644 index 00000000000..4dbc4146cf9 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go new file mode 100644 index 00000000000..1dae8bb7cd8 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML