diff --git a/README.md b/README.md index 1fd24af4..42502ef9 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,8 @@ - [API Deprecation History](#api-deprecation-history) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) - - [Full test environment](#full-test-environment) + - [Full test environment - Configure rate limits through files](#full-test-environment---configure-rate-limits-through-files) + - [Full test environment - Configure rate limits through an xDS Management Server](#full-test-environment---configure-rate-limits-through-an-xds-management-server) - [Self-contained end-to-end integration test](#self-contained-end-to-end-integration-test) - [Configuration](#configuration) - [The configuration format](#the-configuration-format) @@ -25,6 +26,8 @@ - [Example 6](#example-6) - [Example 7](#example-7) - [Loading Configuration](#loading-configuration) + - [File Based Configuration Loading](#file-based-configuration-loading) + - [xDS Management Server Based Configuration Loading](#xds-management-server-based-configuration-loading) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) - [Request Fields](#request-fields) @@ -129,11 +132,12 @@ If you want to run with [two redis instances](#two-redis-instances), you will ne the docker-compose.yml file to run a second redis container, and change the environment variables as explained in the [two redis instances](#two-redis-instances) section. -## Full test environment +## Full test environment - Configure rate limits through files To run a fully configured environment to demo Envoy based rate limiting, run: ```bash +export CONFIG_TYPE=FILE docker-compose -f docker-compose-example.yml up --build --remove-orphans ``` @@ -160,6 +164,36 @@ To see the metrics in the example curl http://localhost:9102/metrics | grep -i shadow ``` +## Full test environment - Configure rate limits through an xDS Management Server + +To run a fully configured environment to demo Envoy based rate limiting, run: + +```bash +export CONFIG_TYPE=GRPC_XDS_SOTW +docker-compose -f docker-compose-example.yml --profile xds-config up --build --remove-orphans +``` + +This will run in `xds-config` docker-compose profile which will run example xDS-Server, ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. + +```bash +curl localhost:8888/test +curl localhost:8888/header -H "foo: foo" # Header based +curl localhost:8888/twoheader -H "foo: foo" -H "bar: bar" # Two headers +curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" # This will be rate limited +curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular header value +curl localhost:8888/twoheader -H "foo: foo" -H "baz: shady" # This will never be ratelimited since "baz" with value "shady" is in shadow_mode +curl localhost:8888/twoheader -H "foo: foo" -H "baz: not-so-shady" # This is subject to rate-limiting because the it's now in shadow_mode +``` + +Edit[`examples/xds-sotw-config-server/resource.go`](examples/xds-sotw-config-server/resource.go) to test different rate limit configs. + +To see the metrics in the example + +```bash +# The metrics for the shadow_mode keys +curl http://localhost:9102/metrics | grep -i shadow +``` + ## Self-contained end-to-end integration test Integration tests are coded as bash-scripts in `integration-test/scripts`. @@ -492,6 +526,18 @@ descriptors: ## Loading Configuration +Rate limit service supports following configuration loading methods. You can define which methods to use by configuring environment variable `CONFIG_TYPE`. + +| Config Loading Method | Value for Environment Variable `CONFIG_TYPE` | +| --------------------------------------------------------------------------------- | -------------------------------------------- | +| [File Based Configuration Loading](#file-based-configuration-loading) | `FILE` (Default) | +| [xDS Server Based Configuration Loading](#xds-server-based-configuration-loading) | `GRPC_XDS_SOTW` | + +When the environment variable `FORCE_START_WITHOUT_INITIAL_CONFIG` set to `false`, the Rate limit service will wait for initial rate limit configuration before +starting the server (gRPC, Rest server endpoints). When set to `true` the server will start even without initial configuration. + +### File Based Configuration Loading + The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors a designated path, and watches for symlink swaps to files in the directory tree to reload configuration files. @@ -525,6 +571,31 @@ For more information on how runtime works you can read its [README](https://gith By default it is not possible to define multiple configuration files within `RUNTIME_SUBDIRECTORY` referencing the same domain. To enable this behavior set `MERGE_DOMAIN_CONFIG` to `true`. +### xDS Management Server Based Configuration Loading + +xDS Management Server is a gRPC server which implements the [Aggregated Discovery Service (ADS)](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/ads.proto). +The xDS Management server serves [Discovery Response](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/discovery.proto#L69) with [Ratelimit Configuration Resources](api/ratelimit/config/ratelimit/v3/rls_conf.proto) +and with Type URL `"type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig"`. +The xDS client in the Rate limit service configure Rate limit service with the provided configuration. +For more information on xDS protocol please refer to the [envoy proxy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol). + +You can refer to [the sample xDS configuration management server](examples/xds-sotw-config-server/README.md). + +The xDS server for listening for configuration can be set via [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go) +package with the following environment variables: + +``` +CONFIG_GRPC_XDS_NODE_ID default:"default" +CONFIG_GRPC_XDS_SERVER_URL default:"localhost:18000" +CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL default:"3s" +``` + +As well Ratelimit supports TLS connections, these can be configured using the following environment variables: + +1. `CONFIG_GRPC_XDS_SERVER_USE_TLS`: set to `"true"` to enable a TLS connection with the xDS configuration management server. +2. `CONFIG_GRPC_XDS_CLIENT_TLS_CERT`, `CONFIG_GRPC_XDS_CLIENT_TLS_KEY`, and `CONFIG_GRPC_XDS_SERVER_TLS_CACERT` to provides files to specify a TLS connection configuration to the xDS configuration management server. +3. `CONFIG_GRPC_XDS_SERVER_TLS_SAN`: (Optional) Override the SAN value to validate from the server certificate. + ## Log Format A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. diff --git a/api/ratelimit/config/ratelimit/v3/rls_conf.proto b/api/ratelimit/config/ratelimit/v3/rls_conf.proto new file mode 100644 index 00000000..cdb1836f --- /dev/null +++ b/api/ratelimit/config/ratelimit/v3/rls_conf.proto @@ -0,0 +1,104 @@ +syntax = "proto3"; + +package ratelimit.config.ratelimit.v3; + +option java_package = "io.envoyproxy.ratelimit.config.ratelimit.v3"; +option java_outer_classname = "RlsConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3;ratelimitv3"; + +// [#protodoc-title: Rate limit service configuration] +// A management server which supports ADS (Aggregated Discovery Service - SotW or delta protocol) can apply +// rate limit service configuration using the message type RateLimitConfig. The ADS client within the rate limit service +// will stream Discovery Request with the resource type URL "type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig". +// The ADS management server should respond stream of Discovery Response with the same type URL and array of RateLimitConfigs +// within resources of the Discovery Response. + +// Rate limit configuration for a single domain. +message RateLimitConfig { + // Name of the rate limit configuration. This should be unique for each configuration. + string name = 1; + + // Domain name for the rate limit configuration. + string domain = 2; + + // List of rate limit configuration descriptors. + repeated RateLimitDescriptor descriptors = 3; +} + +// Rate limit configuration descriptor. +message RateLimitDescriptor { + // Key of the descriptor. + string key = 1; + + // Optional value of the descriptor. + string value = 2; + + // Rate limit policy of the descriptor. + RateLimitPolicy rate_limit = 3; + + // List of sub rate limit descriptors. + repeated RateLimitDescriptor descriptors = 4; + + // Mark the descriptor as shadow. When the values is true, rate limit service allow requests to the backend. + bool shadow_mode = 5; +} + +// Rate-limit policy. +message RateLimitPolicy { + // Unit of time for the rate limit. + RateLimitUnit unit = 1; + + // Number of requests allowed in the policy within `unit` time. + uint32 requests_per_unit = 2; + + // Mark the rate limit policy as unlimited. All requests are allowed to the backend. + bool unlimited = 3; + + // Optional name for the rate limit policy. Name the policy, if it should be replaced (dropped evaluation) by + // another policy. + string name = 4; + + // List of rate limit policies, this rate limit policy will replace (drop evaluation) + // For more information: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#replaces + // Example: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#example-7 + repeated RateLimitReplace replaces = 5; +} + +// Replace specifies the rate limit policy that should be replaced (dropped evaluation). +// For more information: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#replaces +message RateLimitReplace { + // Name of the rate limit policy, that is being replaced (dropped evaluation). + string name = 1; +} + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} + +// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] + +// Return list of all rate limit configs that rate limit service should be configured with. +service RateLimitConfigDiscoveryService { + rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) + returns (stream envoy.service.discovery.v3.DiscoveryResponse) { + } + + rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) + returns (envoy.service.discovery.v3.DiscoveryResponse) { + } +} diff --git a/docker-compose-example.yml b/docker-compose-example.yml index ba4cc69e..242a5011 100644 --- a/docker-compose-example.yml +++ b/docker-compose-example.yml @@ -49,6 +49,22 @@ services: - RUNTIME_ROOT=/data - RUNTIME_SUBDIRECTORY=ratelimit - RUNTIME_WATCH_ROOT=false + - CONFIG_TYPE=${CONFIG_TYPE:-FILE} + - CONFIG_GRPC_XDS_NODE_ID=test-node-id + - CONFIG_GRPC_XDS_SERVER_URL=ratelimit-xds-config-server:18000 + + ratelimit-xds-config-server: + image: ratelimit-xds-config-server:latest + build: + context: examples/xds-sotw-config-server + dockerfile: Dockerfile + command: ["-nodeID", "test-node-id", "-port", "18000", "-debug", "true"] + expose: + - 18000 + networks: + - ratelimit-network + profiles: + - xds-config envoy-proxy: image: envoyproxy/envoy-dev:latest diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile new file mode 100644 index 00000000..52439213 --- /dev/null +++ b/examples/xds-sotw-config-server/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.18 AS build +WORKDIR /xds-server + +COPY . . + +RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/xds-server -v main/main.go + +FROM alpine:3.16 AS final +RUN apk --no-cache add ca-certificates && apk --no-cache update +COPY --from=build /go/bin/xds-server /bin/xds-server +ENTRYPOINT [ "/bin/xds-server" ] diff --git a/examples/xds-sotw-config-server/README.md b/examples/xds-sotw-config-server/README.md new file mode 100644 index 00000000..4586cf14 --- /dev/null +++ b/examples/xds-sotw-config-server/README.md @@ -0,0 +1,17 @@ +# Example Rate-limit Configuration SotW xDS Server + +This is an example of a trivial xDS V3 control plane server similar to the example server in [go-control-plane](https://github.com/envoyproxy/go-control-plane/tree/main/internal/example). It serves sample Rate limit configuration. You can run the example using the project top-level docker-compose-example.yml, e.g.: + +```bash +export CONFIG_TYPE=GRPC_XDS_SOTW +docker-compose -f docker-compose-example.yml --profile xds-config up --build --remove-orphans +``` + +The docker-compose builds and runs the example server along with Rate limit server. The example server serves a configuration defined in [`resource.go`](resource.go). If everything works correctly, you can follow the [examples in project top-level README.md file](../../README.md#examples). + +## Files + +- [main/main.go](main/main.go) is the example program entrypoint. It instantiates the cache and xDS server and runs the xDS server process. +- [resource.go](resource.go) generates a `Snapshot` structure which describes the configuration that the xDS server serves to Envoy. +- [server.go](server.go) runs the xDS control plane server. +- [logger.go](logger.go) is the logger. diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod new file mode 100644 index 00000000..7c5ac4cd --- /dev/null +++ b/examples/xds-sotw-config-server/go.mod @@ -0,0 +1,20 @@ +module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server + +go 1.18 + +require ( + github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f + google.golang.org/grpc v1.52.0 +) + +require ( + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + github.com/golang/protobuf v1.5.2 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum new file mode 100644 index 00000000..d82360b9 --- /dev/null +++ b/examples/xds-sotw-config-server/go.sum @@ -0,0 +1,73 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/xds-sotw-config-server/logger.go b/examples/xds-sotw-config-server/logger.go new file mode 100644 index 00000000..bbfeadde --- /dev/null +++ b/examples/xds-sotw-config-server/logger.go @@ -0,0 +1,27 @@ +package example + +import ( + "log" +) + +type Logger struct { + Debug bool +} + +func (logger Logger) Debugf(format string, args ...interface{}) { + if logger.Debug { + log.Printf("[DEBUG] "+format+"\n", args...) + } +} + +func (logger Logger) Infof(format string, args ...interface{}) { + log.Printf("[INFO]"+format+"\n", args...) +} + +func (logger Logger) Warnf(format string, args ...interface{}) { + log.Printf("[WARN] "+format+"\n", args...) +} + +func (logger Logger) Errorf(format string, args ...interface{}) { + log.Printf("[ERROR]"+format+"\n", args...) +} diff --git a/examples/xds-sotw-config-server/main/main.go b/examples/xds-sotw-config-server/main/main.go new file mode 100644 index 00000000..82da545f --- /dev/null +++ b/examples/xds-sotw-config-server/main/main.go @@ -0,0 +1,54 @@ +package main + +import ( + "context" + "flag" + "os" + + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "github.com/envoyproxy/go-control-plane/pkg/test/v3" + + example "github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server" +) + +var ( + logger example.Logger + port uint + nodeID string +) + +func init() { + logger = example.Logger{} + + flag.BoolVar(&logger.Debug, "debug", false, "Enable xDS server debug logging") + flag.UintVar(&port, "port", 18000, "xDS management server port") + flag.StringVar(&nodeID, "nodeID", "test-node-id", "Node ID") +} + +func main() { + flag.Parse() + + // Create a cache + cache := cache.NewSnapshotCache(false, cache.IDHash{}, logger) + + // Create the snapshot that we'll serve to Envoy + snapshot := example.GenerateSnapshot() + if err := snapshot.Consistent(); err != nil { + logger.Errorf("Snapshot is inconsistent: %+v\n%+v", snapshot, err) + os.Exit(1) + } + logger.Debugf("Will serve snapshot %+v", snapshot) + + // Add the snapshot to the cache + if err := cache.SetSnapshot(context.Background(), nodeID, snapshot); err != nil { + logger.Errorf("Snapshot error %q for %+v", err, snapshot) + os.Exit(1) + } + + // Run the xDS server + ctx := context.Background() + cb := &test.Callbacks{Debug: logger.Debug} + srv := server.NewServer(ctx, cache, cb) + example.RunServer(ctx, srv, port) +} diff --git a/examples/xds-sotw-config-server/resource.go b/examples/xds-sotw-config-server/resource.go new file mode 100644 index 00000000..71df6324 --- /dev/null +++ b/examples/xds-sotw-config-server/resource.go @@ -0,0 +1,170 @@ +package example + +import ( + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +func makeRlsConfig() []types.Resource { + return []types.Resource{ + &rls_config.RateLimitConfig{ + Name: "mongo_cps", + Domain: "mongo_cps", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "database", + Value: "users", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 500, + }, + }, + { + Key: "database", + Value: "default", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 500, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "rl", + Domain: "rl", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "category", + Value: "account", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "bkthomps"}, {Name: "fake_name"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 4, + }, + }, + { + Key: "source_cluster", + Value: "proxy", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "destination_cluster", + Value: "bkthomps", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "bkthomps"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + { + Key: "destination_cluster", + Value: "mock", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + { + Key: "destination_cluster", + Value: "override", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "banned_limit"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + { + Key: "destination_cluster", + Value: "fake", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "fake_name", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + }, + }, + { + Key: "foo", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "bar", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + { + Key: "bar", + Value: "bkthomps", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "bkthomps", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + { + Key: "bar", + Value: "banned", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "banned_limit", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 0, + }, + }, + { + Key: "baz", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 1, + }, + }, + { + Key: "baz", + Value: "not-so-shady", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + { + Key: "baz", + Value: "shady", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + ShadowMode: true, + }, + { + Key: "bay", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + }, + }, + { + Key: "qux", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + }, + }, + } +} + +func GenerateSnapshot() *cache.Snapshot { + snap, _ := cache.NewSnapshot("1", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: makeRlsConfig(), + }, + ) + return snap +} diff --git a/examples/xds-sotw-config-server/server.go b/examples/xds-sotw-config-server/server.go new file mode 100644 index 00000000..0e120b58 --- /dev/null +++ b/examples/xds-sotw-config-server/server.go @@ -0,0 +1,49 @@ +package example + +import ( + "context" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" +) + +const ( + grpcKeepaliveTime = 30 * time.Second + grpcKeepaliveTimeout = 5 * time.Second + grpcKeepaliveMinTime = 30 * time.Second + grpcMaxConcurrentStreams = 1000000 +) + +// RunServer starts an xDS server at the given port. +func RunServer(ctx context.Context, srv server.Server, port uint) { + grpcServer := grpc.NewServer( + grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams), + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: grpcKeepaliveTime, + Timeout: grpcKeepaliveTimeout, + }), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: grpcKeepaliveMinTime, + PermitWithoutStream: true, + }), + ) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + log.Fatal(err) + } + + discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, srv) + + log.Printf("Management server listening on %d\n", port) + if err = grpcServer.Serve(lis); err != nil { + log.Println(err) + } +} diff --git a/go.mod b/go.mod index 6506dba4..3ed66726 100644 --- a/go.mod +++ b/go.mod @@ -6,45 +6,47 @@ require ( github.com/alicebob/miniredis/v2 v2.23.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.10.1 + github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.5.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 github.com/lyft/gostats v0.4.1 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.1 golang.org/x/net v0.4.0 - google.golang.org/grpc v1.45.0 + google.golang.org/grpc v1.52.0 + google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 ) require ( github.com/cenkalti/backoff/v4 v4.1.2 // indirect - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/google/uuid v1.3.0 github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 go.opentelemetry.io/otel v1.7.0 @@ -53,7 +55,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 - go.opentelemetry.io/proto/otlp v0.16.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect ) diff --git a/go.sum b/go.sum index 79af9ed7..efe276bd 100644 --- a/go.sum +++ b/go.sum @@ -12,14 +12,16 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -46,6 +48,8 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -59,9 +63,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -73,21 +77,26 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -130,8 +139,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -149,9 +158,12 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -161,9 +173,12 @@ github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -175,28 +190,35 @@ github.com/lyft/gostats v0.4.1 h1:oR6p4HRCGxt0nUntmZIWmYMgyothBi3eZH2A71vRjsc= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= @@ -228,10 +250,13 @@ go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJ go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E= -go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -303,8 +328,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -313,6 +338,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -344,7 +370,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -399,15 +424,18 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -429,8 +457,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -452,6 +480,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -461,8 +490,9 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -479,8 +509,9 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -494,8 +525,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -504,8 +536,9 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/src/config/config.go b/src/config/config.go index 7a22734d..aac8b11b 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -41,8 +41,8 @@ type RateLimitConfig interface { // Information for a config file to load into the aggregate config. type RateLimitConfigToLoad struct { - Name string - FileBytes string + Name string + ConfigYaml *YamlRoot } // Interface for loading a configuration from a list of YAML files. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 806c6411..24a7111e 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -107,8 +107,8 @@ func (this *rateLimitDescriptor) dump() string { // Create a new config error which includes the owning file. // @param config supplies the config file that generated the error. // @param err supplies the error string. -func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimitConfigError { - return RateLimitConfigError(fmt.Sprintf("%s: %s", config.Name, err)) +func newRateLimitConfigError(name string, err string) RateLimitConfigError { + return RateLimitConfigError(fmt.Sprintf("%s: %s", name, err)) } // Load a set of config descriptors from the YAML file and check the input. @@ -119,7 +119,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []YamlDescriptor, statsManager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { - panic(newRateLimitConfigError(config, "descriptor has empty key")) + panic(newRateLimitConfigError(config.Name, "descriptor has empty key")) } // Value is optional, so the final key for the map is either the key only or key_value. @@ -131,7 +131,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p newParentKey := parentKey + finalKey if _, present := this.descriptors[finalKey]; present { panic(newRateLimitConfigError( - config, fmt.Sprintf("duplicate descriptor composite key '%s'", newParentKey))) + config.Name, fmt.Sprintf("duplicate descriptor composite key '%s'", newParentKey))) } var rateLimit *RateLimit = nil @@ -146,12 +146,12 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p if unlimited { if validUnit { panic(newRateLimitConfigError( - config, + config.Name, fmt.Sprintf("should not specify rate limit unit when unlimited"))) } } else if !validUnit { panic(newRateLimitConfigError( - config, + config.Name, fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit))) } @@ -171,10 +171,10 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p for _, replaces := range descriptorConfig.RateLimit.Replaces { if replaces.Name == "" { - panic(newRateLimitConfigError(config, "should not have an empty replaces entry")) + panic(newRateLimitConfigError(config.Name, "should not have an empty replaces entry")) } if replaces.Name == descriptorConfig.RateLimit.Name { - panic(newRateLimitConfigError(config, "replaces should not contain name of same descriptor")) + panic(newRateLimitConfigError(config.Name, "replaces should not contain name of same descriptor")) } } } @@ -190,17 +190,17 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p // Validate a YAML config file's keys. // @param config specifies the file contents to load. // @param any specifies the yaml file and a map. -func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]interface{}) { +func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { for k, v := range config_map { if _, ok := k.(string); !ok { errorText := fmt.Sprintf("config error, key is not of type string: %v", k) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } if _, ok := validKeys[k.(string)]; !ok { errorText := fmt.Sprintf("config error, unknown key '%s'", k) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } switch v := v.(type) { case []interface{}: @@ -208,13 +208,13 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i if _, ok := e.(map[interface{}]interface{}); !ok { errorText := fmt.Sprintf("config error, yaml file contains list of type other than map: %v", e) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } element := e.(map[interface{}]interface{}) - validateYamlKeys(config, element) + validateYamlKeys(fileName, element) } case map[interface{}]interface{}: - validateYamlKeys(config, v) + validateYamlKeys(fileName, v) // string is a leaf type in ratelimit config. No need to keep validating. case string: // int is a leaf type in ratelimit config. No need to keep validating. @@ -227,40 +227,24 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i default: errorText := fmt.Sprintf("error checking config") logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } } } -// Load a single YAML config file into the global config. -// @param config specifies the file contents to load. +// Load a single YAML config into the global config. +// @param config specifies the yamlRoot struct to load. func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { - // validate keys in config with generic map - any := map[interface{}]interface{}{} - err := yaml.Unmarshal([]byte(config.FileBytes), &any) - if err != nil { - errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) - } - validateYamlKeys(config, any) - - var root YamlRoot - err = yaml.Unmarshal([]byte(config.FileBytes), &root) - if err != nil { - errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) - } + root := config.ConfigYaml if root.Domain == "" { - panic(newRateLimitConfigError(config, "config file cannot have empty domain")) + panic(newRateLimitConfigError(config.Name, "config file cannot have empty domain")) } if _, present := this.domains[root.Domain]; present { if !this.mergeDomainConfigs { panic(newRateLimitConfigError( - config, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) + config.Name, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) } logger.Debugf("patching domain: %s", root.Domain) @@ -357,6 +341,31 @@ func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str return domain + "." + rateLimitKey } +// ConfigFileContentToYaml converts a single YAML (string content) into yamlRoot struct with validating yaml keys. +// @param fileName specifies the name of the file. +// @param content specifies the string content of the yaml file. +func ConfigFileContentToYaml(fileName, content string) *YamlRoot { + // validate keys in config with generic map + any := map[interface{}]interface{}{} + err := yaml.Unmarshal([]byte(content), &any) + if err != nil { + errorText := fmt.Sprintf("error loading config file: %s", err.Error()) + logger.Debugf(errorText) + panic(newRateLimitConfigError(fileName, errorText)) + } + validateYamlKeys(fileName, any) + + var root YamlRoot + err = yaml.Unmarshal([]byte(content), &root) + if err != nil { + errorText := fmt.Sprintf("error loading config file: %s", err.Error()) + logger.Debugf(errorText) + panic(newRateLimitConfigError(fileName, errorText)) + } + + return &root +} + // Create rate limit config from a list of input YAML files. // @param configs specifies a list of YAML files to load. // @param stats supplies the stats scope to use for limit stats during runtime. diff --git a/src/config/config_xds.go b/src/config/config_xds.go new file mode 100644 index 00000000..1e772c36 --- /dev/null +++ b/src/config/config_xds.go @@ -0,0 +1,49 @@ +package config + +import ( + rls_conf_v3 "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +// ConfigXdsProtoToYaml converts Xds Proto format to yamlRoot +func ConfigXdsProtoToYaml(xdsProto *rls_conf_v3.RateLimitConfig) *YamlRoot { + return &YamlRoot{ + Domain: xdsProto.Domain, + Descriptors: rateLimitDescriptorsPbToYaml(xdsProto.Descriptors), + } +} + +func rateLimitDescriptorsPbToYaml(pb []*rls_conf_v3.RateLimitDescriptor) []YamlDescriptor { + descriptors := make([]YamlDescriptor, len(pb)) + for i, d := range pb { + descriptors[i] = YamlDescriptor{ + Key: d.Key, + Value: d.Value, + RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), + Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), + ShadowMode: d.ShadowMode, + } + } + + return descriptors +} + +func rateLimitPolicyPbToYaml(pb *rls_conf_v3.RateLimitPolicy) *YamlRateLimit { + if pb == nil { + return nil + } + return &YamlRateLimit{ + RequestsPerUnit: pb.RequestsPerUnit, + Unit: pb.Unit.String(), + Unlimited: pb.Unlimited, + Name: pb.Name, + Replaces: rateLimitReplacesPbToYaml(pb.Replaces), + } +} + +func rateLimitReplacesPbToYaml(pb []*rls_conf_v3.RateLimitReplace) []yamlReplaces { + replaces := make([]yamlReplaces, len(pb)) + for i, r := range pb { + replaces[i] = yamlReplaces{Name: r.Name} + } + return replaces +} diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index 750af791..dc313c31 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -51,7 +51,8 @@ func main() { fmt.Printf("error reading file %s: %s\n", finalPath, err.Error()) os.Exit(1) } - allConfigs = append(allConfigs, config.RateLimitConfigToLoad{finalPath, string(bytes)}) + configYaml := config.ConfigFileContentToYaml(finalPath, string(bytes)) + allConfigs = append(allConfigs, config.RateLimitConfigToLoad{Name: finalPath, ConfigYaml: configYaml}) } loadConfigs(allConfigs, *mergeDomainConfigs) diff --git a/src/provider/file_provider.go b/src/provider/file_provider.go new file mode 100644 index 00000000..07bfa11a --- /dev/null +++ b/src/provider/file_provider.go @@ -0,0 +1,118 @@ +package provider + +import ( + "path/filepath" + "strings" + + "github.com/lyft/goruntime/loader" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" +) + +type FileProvider struct { + settings settings.Settings + loader config.RateLimitConfigLoader + configUpdateEventChan chan ConfigUpdateEvent + runtime loader.IFace + runtimeUpdateEvent chan int + runtimeWatchRoot bool + rootStore gostats.Store + statsManager stats.Manager +} + +func (p *FileProvider) ConfigUpdateEvent() <-chan ConfigUpdateEvent { + return p.configUpdateEventChan +} + +func (p *FileProvider) Stop() {} + +func (p *FileProvider) watch() { + p.runtime.AddUpdateCallback(p.runtimeUpdateEvent) + + go func() { + p.sendEvent() + // No exit right now. + for { + logger.Debugf("waiting for runtime update") + <-p.runtimeUpdateEvent + logger.Debugf("got runtime update and reloading config") + p.sendEvent() + } + }() +} + +func (p *FileProvider) sendEvent() { + defer func() { + if e := recover(); e != nil { + p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} + } + }() + + files := []config.RateLimitConfigToLoad{} + snapshot := p.runtime.Snapshot() + for _, key := range snapshot.Keys() { + if p.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { + continue + } + + configYaml := config.ConfigFileContentToYaml(key, snapshot.Get(key)) + files = append(files, config.RateLimitConfigToLoad{Name: key, ConfigYaml: configYaml}) + } + + rlSettings := settings.NewSettings() + newConfig := p.loader.Load(files, p.statsManager, rlSettings.MergeDomainConfigurations) + + p.configUpdateEventChan <- &ConfigUpdateEventImpl{config: newConfig} +} + +func (p *FileProvider) setupRuntime() { + loaderOpts := make([]loader.Option, 0, 1) + if p.settings.RuntimeIgnoreDotFiles { + loaderOpts = append(loaderOpts, loader.IgnoreDotFiles) + } else { + loaderOpts = append(loaderOpts, loader.AllowDotFiles) + } + var err error + if p.settings.RuntimeWatchRoot { + p.runtime, err = loader.New2( + p.settings.RuntimePath, + p.settings.RuntimeSubdirectory, + p.rootStore.ScopeWithTags("runtime", p.settings.ExtraTags), + &loader.SymlinkRefresher{RuntimePath: p.settings.RuntimePath}, + loaderOpts...) + } else { + directoryRefresher := &loader.DirectoryRefresher{} + // Adding loader.Remove to the default set of goruntime's FileSystemOps. + directoryRefresher.WatchFileSystemOps(loader.Remove, loader.Write, loader.Create, loader.Chmod) + + p.runtime, err = loader.New2( + filepath.Join(p.settings.RuntimePath, p.settings.RuntimeSubdirectory), + "config", + p.rootStore.ScopeWithTags("runtime", p.settings.ExtraTags), + directoryRefresher, + loaderOpts...) + } + + if err != nil { + panic(err) + } +} + +func NewFileProvider(settings settings.Settings, statsManager stats.Manager, rootStore gostats.Store) RateLimitConfigProvider { + p := &FileProvider{ + settings: settings, + loader: config.NewRateLimitConfigLoaderImpl(), + configUpdateEventChan: make(chan ConfigUpdateEvent), + runtimeUpdateEvent: make(chan int), + runtimeWatchRoot: settings.RuntimeWatchRoot, + rootStore: rootStore, + statsManager: statsManager, + } + p.setupRuntime() + go p.watch() + return p +} diff --git a/src/provider/provider.go b/src/provider/provider.go new file mode 100644 index 00000000..a4cade4c --- /dev/null +++ b/src/provider/provider.go @@ -0,0 +1,29 @@ +package provider + +import ( + "github.com/envoyproxy/ratelimit/src/config" +) + +// RateLimitConfigProvider is the interface for configurations providers. +type RateLimitConfigProvider interface { + // ConfigUpdateEvent returns a receive-only channel for retrieve configuration updates + // The provider implementer should send config update to this channel when it detect a config update + // Config receiver waits for configuration updates + ConfigUpdateEvent() <-chan ConfigUpdateEvent + + // Stop stops the configuration provider watch for configurations. + Stop() +} + +type ConfigUpdateEvent interface { + GetConfig() (config config.RateLimitConfig, err any) +} + +type ConfigUpdateEventImpl struct { + config config.RateLimitConfig + err any +} + +func (e *ConfigUpdateEventImpl) GetConfig() (config config.RateLimitConfig, err any) { + return e.config, e.err +} diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go new file mode 100644 index 00000000..3b891979 --- /dev/null +++ b/src/provider/xds_grpc_sotw_provider.go @@ -0,0 +1,182 @@ +package provider + +import ( + "context" + "fmt" + "strings" + + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "github.com/golang/protobuf/ptypes/any" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + logger "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + + "github.com/envoyproxy/go-control-plane/pkg/client/sotw/v3" + rls_conf_v3 "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +// XdsGrpcSotwProvider is the xDS provider which implements `RateLimitConfigProvider` interface. +type XdsGrpcSotwProvider struct { + settings settings.Settings + loader config.RateLimitConfigLoader + configUpdateEventChan chan ConfigUpdateEvent + statsManager stats.Manager + ctx context.Context + adsClient sotw.ADSClient + // connectionRetryChannel is the channel which trigger true for connection issues + connectionRetryChannel chan bool +} + +// NewXdsGrpcSotwProvider initializes xDS listener and returns the xDS provider. +func NewXdsGrpcSotwProvider(settings settings.Settings, statsManager stats.Manager) RateLimitConfigProvider { + ctx := context.Background() + p := &XdsGrpcSotwProvider{ + settings: settings, + statsManager: statsManager, + ctx: ctx, + configUpdateEventChan: make(chan ConfigUpdateEvent), + connectionRetryChannel: make(chan bool), + loader: config.NewRateLimitConfigLoaderImpl(), + adsClient: sotw.NewADSClient(ctx, getClientNode(settings), resource.RateLimitConfigType), + } + go p.initXdsClient() + return p +} + +// ConfigUpdateEvent returns config provider channel +func (p *XdsGrpcSotwProvider) ConfigUpdateEvent() <-chan ConfigUpdateEvent { + return p.configUpdateEventChan +} + +func (p *XdsGrpcSotwProvider) Stop() { + p.connectionRetryChannel <- false +} + +func (p *XdsGrpcSotwProvider) initXdsClient() { + logger.Info("Starting xDS client connection for rate limit configurations") + conn := p.initializeAndWatch() + + for retryEvent := range p.connectionRetryChannel { + if conn != nil { + conn.Close() + } + if !retryEvent { // stop watching + logger.Info("Stopping xDS client watch for rate limit configurations") + break + } + conn = p.initializeAndWatch() + } +} + +func (p *XdsGrpcSotwProvider) initializeAndWatch() *grpc.ClientConn { + conn, err := p.getGrpcConnection() + if err != nil { + logger.Errorf("Error initializing gRPC connection to xDS Management Server: %s", err.Error()) + p.retryGrpcConn() + return nil + } + + logger.Info("Connection to xDS Management Server is successful") + p.adsClient.InitConnect(conn) + go p.watchConfigs() + return conn +} + +func (p *XdsGrpcSotwProvider) watchConfigs() { + for { + resp, err := p.adsClient.Fetch() + if err != nil { + logger.Errorf("Failed to receive configuration from xDS Management Server: %s", err.Error()) + if sotw.IsConnError(err) { + p.retryGrpcConn() + return + } + p.adsClient.Nack(err.Error()) + } else { + logger.Tracef("Response received from xDS Management Server: %v", resp) + p.sendConfigs(resp.Resources) + } + } +} + +func (p *XdsGrpcSotwProvider) getGrpcConnection() (*grpc.ClientConn, error) { + backOff := grpc_retry.BackoffLinearWithJitter(p.settings.ConfigGrpcXdsServerConnectRetryInterval, 0.5) + logger.Infof("Dialing xDS Management Server: '%s'", p.settings.ConfigGrpcXdsServerUrl) + return grpc.Dial( + p.settings.ConfigGrpcXdsServerUrl, + p.getGrpcTransportCredentials(), + grpc.WithBlock(), + grpc.WithStreamInterceptor( + grpc_retry.StreamClientInterceptor(grpc_retry.WithBackoff(backOff)), + )) +} + +func (p *XdsGrpcSotwProvider) getGrpcTransportCredentials() grpc.DialOption { + if !p.settings.ConfigGrpcXdsServerUseTls { + return grpc.WithTransportCredentials(insecure.NewCredentials()) + } + + configGrpcXdsTlsConfig := p.settings.ConfigGrpcXdsTlsConfig + if p.settings.ConfigGrpcXdsServerTlsSAN != "" { + logger.Infof("ServerName used for xDS Management Service hostname verification is %s", p.settings.ConfigGrpcXdsServerTlsSAN) + configGrpcXdsTlsConfig.ServerName = p.settings.ConfigGrpcXdsServerTlsSAN + } + return grpc.WithTransportCredentials(credentials.NewTLS(configGrpcXdsTlsConfig)) +} + +func (p *XdsGrpcSotwProvider) sendConfigs(resources []*any.Any) { + defer func() { + if e := recover(); e != nil { + p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} + p.adsClient.Nack(fmt.Sprint(e)) + } + }() + + conf := make([]config.RateLimitConfigToLoad, 0, len(resources)) + for _, res := range resources { + confPb := &rls_conf_v3.RateLimitConfig{} + err := anypb.UnmarshalTo(res, confPb, proto.UnmarshalOptions{}) + if err != nil { + logger.Errorf("Error while unmarshalling config from xDS Management Server: %s", err.Error()) + p.adsClient.Nack(err.Error()) + return + } + + configYaml := config.ConfigXdsProtoToYaml(confPb) + conf = append(conf, config.RateLimitConfigToLoad{Name: confPb.Name, ConfigYaml: configYaml}) + } + rlSettings := settings.NewSettings() + rlsConf := p.loader.Load(conf, p.statsManager, rlSettings.MergeDomainConfigurations) + p.configUpdateEventChan <- &ConfigUpdateEventImpl{config: rlsConf} + p.adsClient.Ack() +} + +func (p *XdsGrpcSotwProvider) retryGrpcConn() { + p.connectionRetryChannel <- true +} + +func getClientNode(s settings.Settings) *corev3.Node { + // setting metadata for node + metadataMap := make(map[string]*structpb.Value) + for _, entry := range strings.Split(s.ConfigGrpcXdsNodeMetadata, ",") { + keyValPair := strings.SplitN(entry, "=", 2) + if len(keyValPair) == 2 { + metadataMap[keyValPair[0]] = structpb.NewStringValue(keyValPair[1]) + } + } + + return &corev3.Node{ + Id: s.ConfigGrpcXdsNodeId, + Metadata: &structpb.Struct{Fields: metadataMap}, + } +} diff --git a/src/server/server.go b/src/server/server.go index 46c8ea5d..aa812f8c 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -5,7 +5,8 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/goruntime/loader" + "github.com/envoyproxy/ratelimit/src/provider" + stats "github.com/lyft/gostats" "google.golang.org/grpc" ) @@ -35,9 +36,9 @@ type Server interface { GrpcServer() *grpc.Server /** - * Returns the runtime configuration for the server. + * Returns the configuration provider for the server. */ - Runtime() loader.IFace + Provider() provider.RateLimitConfigProvider /** * Stops serving the grpc port (for integration testing). diff --git a/src/server/server_impl.go b/src/server/server_impl.go index d98fc5ad..ba704449 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -11,7 +11,6 @@ import ( "net/http/pprof" "os" "os/signal" - "path/filepath" "sort" "strconv" "sync" @@ -20,6 +19,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" @@ -59,6 +59,7 @@ type server struct { grpcServer *grpc.Server store gostats.Store scope gostats.Scope + provider provider.RateLimitConfigProvider runtime loader.IFace debugListener serverDebugListener httpServer *http.Server @@ -127,6 +128,18 @@ func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *ht } } +func getProviderImpl(s settings.Settings, statsManager stats.Manager, rootStore gostats.Store) provider.RateLimitConfigProvider { + switch s.ConfigType { + case "FILE": + return provider.NewFileProvider(s, statsManager, rootStore) + case "GRPC_XDS_SOTW": + return provider.NewXdsGrpcSotwProvider(s, statsManager) + default: + logger.Fatalf("Invalid setting for ConfigType: %s", s.ConfigType) + panic("This line should not be reachable") + } +} + func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { server.router.HandleFunc("/json", NewJsonHandler(svc)) } @@ -184,8 +197,8 @@ func (server *server) Scope() gostats.Scope { return server.scope } -func (server *server) Runtime() loader.IFace { - return server.runtime +func (server *server) Provider() provider.RateLimitConfigProvider { + return server.provider } func NewServer(s settings.Settings, name string, statsManager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { @@ -234,37 +247,8 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } - // setup runtime - loaderOpts := make([]loader.Option, 0, 1) - if s.RuntimeIgnoreDotFiles { - loaderOpts = append(loaderOpts, loader.IgnoreDotFiles) - } else { - loaderOpts = append(loaderOpts, loader.AllowDotFiles) - } - var err error - if s.RuntimeWatchRoot { - ret.runtime, err = loader.New2( - s.RuntimePath, - s.RuntimeSubdirectory, - ret.store.ScopeWithTags("runtime", s.ExtraTags), - &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, - loaderOpts...) - } else { - directoryRefresher := &loader.DirectoryRefresher{} - // Adding loader.Remove to the default set of goruntime's FileSystemOps. - directoryRefresher.WatchFileSystemOps(loader.Remove, loader.Write, loader.Create, loader.Chmod) - - ret.runtime, err = loader.New2( - filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), - "config", - ret.store.ScopeWithTags("runtime", s.ExtraTags), - directoryRefresher, - loaderOpts...) - } - - if err != nil { - panic(err) - } + // setup config provider + ret.provider = getProviderImpl(s, statsManager, ret.store) // setup http router ret.router = mux.NewRouter() @@ -339,6 +323,7 @@ func (server *server) Stop() { if server.httpServer != nil { server.httpServer.Close() } + server.provider.Stop() } func (server *server) handleGracefulShutdown() { diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 902c8084..0299d3d3 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -18,13 +18,13 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/goruntime/loader" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/redis" ) @@ -36,14 +36,11 @@ type RateLimitServiceServer interface { } type service struct { - runtime loader.IFace configLock sync.RWMutex - configLoader config.RateLimitConfigLoader + configUpdateEvent <-chan provider.ConfigUpdateEvent config config.RateLimitConfig - runtimeUpdateEvent chan int cache limiter.RateLimitCache stats stats.ServiceStats - runtimeWatchRoot bool customHeadersEnabled bool customHeaderLimitHeader string customHeaderRemainingHeader string @@ -52,35 +49,25 @@ type service struct { globalShadowMode bool } -func (this *service) reloadConfig(statsManager stats.Manager) { - defer func() { - if e := recover(); e != nil { - configError, ok := e.(config.RateLimitConfigError) - if !ok { - panic(e) - } - - this.stats.ConfigLoadError.Inc() - logger.Errorf("error loading new configuration from runtime: %s", configError.Error()) - } - }() - - files := []config.RateLimitConfigToLoad{} - snapshot := this.runtime.Snapshot() - for _, key := range snapshot.Keys() { - if this.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { - continue +func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent) { + newConfig, err := updateEvent.GetConfig() + if err != nil { + configError, ok := err.(config.RateLimitConfigError) + if !ok { + panic(err) } - files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) + this.stats.ConfigLoadError.Inc() + logger.Errorf("Error loading new configuration: %s", configError.Error()) + return } - rlSettings := settings.NewSettings() - newConfig := this.configLoader.Load(files, statsManager, rlSettings.MergeDomainConfigurations) this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig + + rlSettings := settings.NewSettings() this.globalShadowMode = rlSettings.GlobalShadowMode if rlSettings.RateLimitResponseHeadersEnabled { @@ -93,6 +80,7 @@ func (this *service) reloadConfig(statsManager stats.Manager) { this.customHeaderResetHeader = rlSettings.HeaderRatelimitReset } this.configLock.Unlock() + logger.Info("Successfully loaded new configuration") } type serviceError string @@ -312,32 +300,31 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { return this.config, this.globalShadowMode } -func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource, shadowMode bool) RateLimitServiceServer { +func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager, + clock utils.TimeSource, shadowMode, forceStart bool) RateLimitServiceServer { newService := &service{ - runtime: runtime, - configLock: sync.RWMutex{}, - configLoader: configLoader, - config: nil, - runtimeUpdateEvent: make(chan int), - cache: cache, - stats: statsManager.NewServiceStats(), - runtimeWatchRoot: runtimeWatchRoot, - globalShadowMode: shadowMode, - customHeaderClock: clock, + configLock: sync.RWMutex{}, + configUpdateEvent: configProvider.ConfigUpdateEvent(), + config: nil, + cache: cache, + stats: statsManager.NewServiceStats(), + globalShadowMode: shadowMode, + customHeaderClock: clock, } - runtime.AddUpdateCallback(newService.runtimeUpdateEvent) + if !forceStart { + logger.Info("Waiting for initial ratelimit config update event") + newService.setConfig(<-newService.configUpdateEvent) + logger.Info("Successfully loaded the initial ratelimit configs") + } - newService.reloadConfig(statsManager) go func() { - // No exit right now. for { - logger.Debugf("waiting for runtime update") - <-newService.runtimeUpdateEvent - logger.Debugf("got runtime update and reloading config") - newService.reloadConfig(statsManager) + logger.Debug("Waiting for config update event") + updateEvent := <-newService.configUpdateEvent + logger.Debug("Setting config retrieved from config provider") + newService.setConfig(updateEvent) } }() diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 605aed0e..d4593126 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -21,7 +21,6 @@ import ( logger "github.com/sirupsen/logrus" - "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/redis" @@ -117,13 +116,12 @@ func (runner *Runner) Run() { runner.mu.Unlock() service := ratelimit.NewService( - srv.Runtime(), createLimiter(srv, s, localCache, runner.statsManager), - config.NewRateLimitConfigLoaderImpl(), + srv.Provider(), runner.statsManager, - s.RuntimeWatchRoot, utils.NewTimeSourceImpl(), s.GlobalShadowMode, + s.ForceStartWithoutInitialConfig, ) srv.AddDebugHttpEndpoint( diff --git a/src/settings/settings.go b/src/settings/settings.go index d0731511..eae203dc 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -46,6 +46,28 @@ type Settings struct { LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` + // Rate limit configuration + // ConfigType is the method of configuring rate limits. Possible values "FILE", "GRPC_XDS_SOTW". + ConfigType string `envconfig:"CONFIG_TYPE" default:"FILE"` + // ForceStartWithoutInitialConfig enables start the server without initial rate limit config event + ForceStartWithoutInitialConfig bool `envconfig:"FORCE_START_WITHOUT_INITIAL_CONFIG" default:"false"` + + // xDS rate limit configuration + // ConfigGrpcXdsNodeId is the Node ID. xDS server should set snapshots to this Node ID + ConfigGrpcXdsNodeId string `envconfig:"CONFIG_GRPC_XDS_NODE_ID" default:"default"` + ConfigGrpcXdsNodeMetadata string `envconfig:"CONFIG_GRPC_XDS_NODE_METADATA" default:""` // eg: "key1:val1,key2=val2" + ConfigGrpcXdsServerUrl string `envconfig:"CONFIG_GRPC_XDS_SERVER_URL" default:"localhost:18000"` + ConfigGrpcXdsServerConnectRetryInterval time.Duration `envconfig:"CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL" default:"3s"` + + // xDS config server TLS configurations + ConfigGrpcXdsTlsConfig *tls.Config + ConfigGrpcXdsServerUseTls bool `envconfig:"CONFIG_GRPC_XDS_SERVER_USE_TLS" default:"false"` + ConfigGrpcXdsClientTlsCert string `envconfig:"CONFIG_GRPC_XDS_CLIENT_TLS_CERT" default:""` + ConfigGrpcXdsClientTlsKey string `envconfig:"CONFIG_GRPC_XDS_CLIENT_TLS_KEY" default:""` + ConfigGrpcXdsServerTlsCACert string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_CACERT" default:""` + // GrpcClientTlsSAN is the SAN to validate from the client cert during mTLS auth + ConfigGrpcXdsServerTlsSAN string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_SAN" default:""` + // Stats-related settings UseStatsd bool `envconfig:"USE_STATSD" default:"true"` StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` @@ -149,6 +171,7 @@ func NewSettings() Settings { // When we require TLS to connect to Redis, we check if we need to connect using the provided key-pair. RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) GrpcServerTlsConfig()(&s) + ConfigGrpcXdsServerTlsConfig()(&s) return s } @@ -178,6 +201,20 @@ func GrpcServerTlsConfig() Option { } } +func ConfigGrpcXdsServerTlsConfig() Option { + return func(s *Settings) { + if s.ConfigGrpcXdsServerUseTls { + configGrpcXdsServerTlsConfig := utils.TlsConfigFromFiles(s.ConfigGrpcXdsClientTlsCert, s.ConfigGrpcXdsClientTlsKey, s.ConfigGrpcXdsServerTlsCACert, utils.ServerCA) + if s.ConfigGrpcXdsServerTlsCACert != "" { + configGrpcXdsServerTlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } else { + configGrpcXdsServerTlsConfig.ClientAuth = tls.NoClientCert + } + s.ConfigGrpcXdsTlsConfig = configGrpcXdsServerTlsConfig + } + } +} + func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { return func(s *Settings) { s.GrpcUnaryInterceptor = i diff --git a/test/common/xds_sotw.go b/test/common/xds_sotw.go new file mode 100644 index 00000000..1b214531 --- /dev/null +++ b/test/common/xds_sotw.go @@ -0,0 +1,70 @@ +package common + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "google.golang.org/grpc" + + discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +type XdsServerConfig struct { + Port int + NodeId string +} + +type SetSnapshotFunc func(*cache.Snapshot) + +func StartXdsSotwServer(t *testing.T, config *XdsServerConfig, initSnapshot *cache.Snapshot) (SetSnapshotFunc, context.CancelFunc) { + t.Helper() + + ctx, cancel := context.WithCancel(context.Background()) + + snapCache := cache.NewSnapshotCache(true, cache.IDHash{}, nil) + if err := initSnapshot.Consistent(); err != nil { + t.Errorf("Error checking consistency in initial snapshot: %v", err) + } + + if err := snapCache.SetSnapshot(context.Background(), config.NodeId, initSnapshot); err != nil { + panic(err) + } + srv := server.NewServer(ctx, snapCache, nil) + + grpcServer := grpc.NewServer() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", config.Port)) + if err != nil { + t.Errorf("Error listening to port: %v: %v", config.Port, err) + } + discoverygrpc.RegisterAggregatedDiscoveryServiceServer(grpcServer, srv) + go func() { + if err = grpcServer.Serve(lis); err != nil { + t.Error(err) + } + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + WaitForTcpPort(context.Background(), config.Port, 1*time.Second) + + cancelFunc := func() { + cancel() + grpcServer.Stop() + } + return setSnapshotFunc(t, snapCache, config.NodeId), cancelFunc +} + +func setSnapshotFunc(t *testing.T, snapCache cache.SnapshotCache, nodeId string) SetSnapshotFunc { + return func(snapshot *cache.Snapshot) { + if err := snapshot.Consistent(); err != nil { + t.Errorf("snapshot inconsistency: %+v\n%+v", snapshot, err) + } + if err := snapCache.SetSnapshot(context.Background(), nodeId, snapshot); err != nil { + t.Errorf("snapshot error %q for %+v", err, snapshot) + } + } +} diff --git a/test/config/config_test.go b/test/config/config_test.go index 2b4cb0a7..ad587e03 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -21,7 +21,8 @@ func loadFile(path string) []config.RateLimitConfigToLoad { if err != nil { panic(err) } - return []config.RateLimitConfigToLoad{{path, string(contents)}} + configYaml := config.ConfigFileContentToYaml(path, string(contents)) + return []config.RateLimitConfigToLoad{{Name: path, ConfigYaml: configYaml}} } func TestBasicConfig(t *testing.T) { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a835c508..74cfb8b1 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -95,6 +95,20 @@ func TestBasicConfig(t *testing.T) { }) } +func TestXdsProviderBasicConfig(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + {Port: 6380}, + }, func() { + _, cancel := startXdsSotwServer(t) + defer cancel() + t.Run("WithoutPerSecondRedis", testXdsProviderBasicConfig(false, 0)) + t.Run("WithPerSecondRedis", testXdsProviderBasicConfig(true, 0)) + t.Run("WithoutPerSecondRedisWithLocalCache", testXdsProviderBasicConfig(false, 1000)) + t.Run("WithPerSecondRedisWithLocalCache", testXdsProviderBasicConfig(true, 1000)) + }) +} + func TestBasicConfig_ExtraTags(t *testing.T) { common.WithMultiRedis(t, []common.RedisConfig{ {Port: 6383}, @@ -181,6 +195,17 @@ func TestBasicReloadConfig(t *testing.T) { }) } +func TestXdsProviderBasicConfigReload(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + }, func() { + setSnapshotFunc, cancel := startXdsSotwServer(t) + defer cancel() + + t.Run("ReloadConfigWithXdsServer", testXdsProviderBasicConfigReload(setSnapshotFunc, false, 0)) + }) +} + func makeSimpleMemcacheSettings(memcachePorts []int, localCacheSize int) settings.Settings { s := defaultSettings() var memcacheHostAndPort []string @@ -381,7 +406,7 @@ func testBasicConfigWithoutWatchRootWithRedisSentinel(perSecond bool, local_cach func testBasicConfigReload(perSecond bool, local_cache_size int, runtimeWatchRoot bool) func(*testing.T) { s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) s.RuntimeWatchRoot = runtimeWatchRoot - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func testBasicConfigReloadWithRedisCluster(perSecond bool, local_cache_size int, runtimeWatchRoot string) func(*testing.T) { @@ -395,7 +420,7 @@ func testBasicConfigReloadWithRedisCluster(perSecond bool, local_cache_size int, configRedisCluster(&s) - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func testBasicConfigReloadWithRedisSentinel(perSecond bool, local_cache_size int, runtimeWatchRoot bool) func(*testing.T) { @@ -409,7 +434,7 @@ func testBasicConfigReloadWithRedisSentinel(perSecond bool, local_cache_size int s.RuntimeWatchRoot = runtimeWatchRoot - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func getCacheKey(cacheKey string, enableLocalCache bool) string { @@ -671,7 +696,7 @@ func startTestRunner(t *testing.T, s settings.Settings) *runner.Runner { return &runner } -func testConfigReload(s settings.Settings) func(*testing.T) { +func testConfigReload(s settings.Settings, reloadConfFunc, restoreConfFunc func()) func(*testing.T) { return func(t *testing.T) { enable_local_cache := s.LocalCacheSizeInBytes > 0 runner := startTestRunner(t, s) @@ -698,26 +723,7 @@ func testConfigReload(s settings.Settings) func(*testing.T) { runner.GetStatsStore().Flush() loadCountBefore := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() - // Copy a new file to config folder to test config reload functionality - in, err := os.Open("runtime/current/ratelimit/reload.yaml") - if err != nil { - panic(err) - } - defer in.Close() - out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") - if err != nil { - panic(err) - } - defer out.Close() - _, err = io.Copy(out, in) - if err != nil { - panic(err) - } - err = out.Close() - if err != nil { - panic(err) - } - + reloadConfFunc() loadCountAfter, reloaded := waitForConfigReload(runner, loadCountBefore) assert.True(reloaded) @@ -739,11 +745,7 @@ func testConfigReload(s settings.Settings) func(*testing.T) { response) assert.NoError(err) - err = os.Remove("runtime/current/ratelimit/config/reload.yaml") - if err != nil { - panic(err) - } - + restoreConfFunc() // Removal of config files must trigger a reload loadCountBefore = loadCountAfter loadCountAfter, reloaded = waitForConfigReload(runner, loadCountBefore) @@ -752,6 +754,35 @@ func testConfigReload(s settings.Settings) func(*testing.T) { } } +func reloadNewConfigFile() { + // Copy a new file to config folder to test config reload functionality + in, err := os.Open("runtime/current/ratelimit/reload.yaml") + if err != nil { + panic(err) + } + defer in.Close() + out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + defer out.Close() + _, err = io.Copy(out, in) + if err != nil { + panic(err) + } + err = out.Close() + if err != nil { + panic(err) + } +} + +func restoreConfigFile() { + err := os.Remove("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } +} + func waitForConfigReload(runner *runner.Runner, loadCountBefore uint64) (uint64, bool) { // Need to wait for config reload to take place and new descriptors to be loaded. // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. diff --git a/test/integration/xds_sotw_integration_test.go b/test/integration/xds_sotw_integration_test.go new file mode 100644 index 00000000..01f43583 --- /dev/null +++ b/test/integration/xds_sotw_integration_test.go @@ -0,0 +1,182 @@ +//go:build integration + +package integration_test + +import ( + "context" + "testing" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" + + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +func testXdsProviderBasicConfig(perSecond bool, local_cache_size int) func(*testing.T) { + s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) + configXdsProvider(&s) + + return testBasicBaseConfig(s) +} + +func testXdsProviderBasicConfigReload(setSnapshotFunc common.SetSnapshotFunc, perSecond bool, local_cache_size int) func(*testing.T) { + s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) + configXdsProvider(&s) + return testConfigReload(s, newConfigWithXdsConfigProvider(setSnapshotFunc), restoreConfigWithXdsConfigProvider(setSnapshotFunc)) +} + +func configXdsProvider(s *settings.Settings) { + s.ConfigType = "GRPC_XDS_SOTW" + s.ConfigGrpcXdsNodeId = "init-test-node" + s.ConfigGrpcXdsServerUrl = "localhost:18000" +} + +func startXdsSotwServer(t *testing.T) (common.SetSnapshotFunc, context.CancelFunc) { + conf := &common.XdsServerConfig{Port: 18000, NodeId: "init-test-node"} + intSnapshot, err := cache.NewSnapshot("1", initialXdsBasicConfig()) + if err != nil { + panic(err) + } + return common.StartXdsSotwServer(t, conf, intSnapshot) +} + +func initialXdsBasicConfig() map[resource.Type][]types.Resource { + return map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "basic", + Domain: "basic", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "key1_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "one_per_minute", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "another", + Domain: "another", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 20, + }, + }, + { + Key: "key3", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 10, + }, + }, + { + Key: "key2_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 20, + }, + }, + { + Key: "key3_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 10, + }, + }, + { + Key: "key4", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 20, + }, + }, + { + Key: "key4_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 20, + }, + }, + }, + }, + }, + } +} + +func newConfigWithXdsConfigProvider(setSnapshotFunc common.SetSnapshotFunc) func() { + initConfig := initialXdsBasicConfig() + rlsConf := initConfig[resource.RateLimitConfigType] + newRlsConf := append(rlsConf, &rls_config.RateLimitConfig{ + Name: "reload", + Domain: "reload", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "block", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 0, + }, + }, + { + Key: "one_per_minute", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + }, + }) + + newConfig := map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: newRlsConf, + } + newSnapshot, err := cache.NewSnapshot("2", newConfig) + if err != nil { + panic(err) + } + + return func() { + setSnapshotFunc(newSnapshot) + } +} + +func restoreConfigWithXdsConfigProvider(setSnapshotFunc common.SetSnapshotFunc) func() { + newSnapshot, err := cache.NewSnapshot("3", initialXdsBasicConfig()) + if err != nil { + panic(err) + } + + return func() { + setSnapshotFunc(newSnapshot) + } +} diff --git a/test/mocks/provider/provider.go b/test/mocks/provider/provider.go new file mode 100644 index 00000000..376f8379 --- /dev/null +++ b/test/mocks/provider/provider.go @@ -0,0 +1,101 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: /Users/renuka/git/ratelimit/src/provider/provider.go + +// Package mock_provider is a generated GoMock package. +package mock_provider + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + config "github.com/envoyproxy/ratelimit/src/config" + provider "github.com/envoyproxy/ratelimit/src/provider" +) + +// MockRateLimitConfigProvider is a mock of RateLimitConfigProvider interface +type MockRateLimitConfigProvider struct { + ctrl *gomock.Controller + recorder *MockRateLimitConfigProviderMockRecorder +} + +// MockRateLimitConfigProviderMockRecorder is the mock recorder for MockRateLimitConfigProvider +type MockRateLimitConfigProviderMockRecorder struct { + mock *MockRateLimitConfigProvider +} + +// NewMockRateLimitConfigProvider creates a new mock instance +func NewMockRateLimitConfigProvider(ctrl *gomock.Controller) *MockRateLimitConfigProvider { + mock := &MockRateLimitConfigProvider{ctrl: ctrl} + mock.recorder = &MockRateLimitConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitConfigProvider) EXPECT() *MockRateLimitConfigProviderMockRecorder { + return m.recorder +} + +// ConfigUpdateEvent mocks base method +func (m *MockRateLimitConfigProvider) ConfigUpdateEvent() <-chan provider.ConfigUpdateEvent { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConfigUpdateEvent") + ret0, _ := ret[0].(<-chan provider.ConfigUpdateEvent) + return ret0 +} + +// ConfigUpdateEvent indicates an expected call of ConfigUpdateEvent +func (mr *MockRateLimitConfigProviderMockRecorder) ConfigUpdateEvent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigUpdateEvent", reflect.TypeOf((*MockRateLimitConfigProvider)(nil).ConfigUpdateEvent)) +} + +// Stop mocks base method +func (m *MockRateLimitConfigProvider) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop +func (mr *MockRateLimitConfigProviderMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockRateLimitConfigProvider)(nil).Stop)) +} + +// MockConfigUpdateEvent is a mock of ConfigUpdateEvent interface +type MockConfigUpdateEvent struct { + ctrl *gomock.Controller + recorder *MockConfigUpdateEventMockRecorder +} + +// MockConfigUpdateEventMockRecorder is the mock recorder for MockConfigUpdateEvent +type MockConfigUpdateEventMockRecorder struct { + mock *MockConfigUpdateEvent +} + +// NewMockConfigUpdateEvent creates a new mock instance +func NewMockConfigUpdateEvent(ctrl *gomock.Controller) *MockConfigUpdateEvent { + mock := &MockConfigUpdateEvent{ctrl: ctrl} + mock.recorder = &MockConfigUpdateEventMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockConfigUpdateEvent) EXPECT() *MockConfigUpdateEventMockRecorder { + return m.recorder +} + +// GetConfig mocks base method +func (m *MockConfigUpdateEvent) GetConfig() (config.RateLimitConfig, any) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfig") + ret0, _ := ret[0].(config.RateLimitConfig) + ret1, _ := ret[1].(any) + return ret0, ret1 +} + +// GetConfig indicates an expected call of GetConfig +func (mr *MockConfigUpdateEventMockRecorder) GetConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfig", reflect.TypeOf((*MockConfigUpdateEvent)(nil).GetConfig)) +} diff --git a/test/provider/xds_grpc_sotw_provider_test.go b/test/provider/xds_grpc_sotw_provider_test.go new file mode 100644 index 00000000..49a124bd --- /dev/null +++ b/test/provider/xds_grpc_sotw_provider_test.go @@ -0,0 +1,330 @@ +package provider_test + +import ( + "fmt" + "os" + "strings" + "testing" + + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + "github.com/envoyproxy/ratelimit/src/provider" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" + "github.com/envoyproxy/ratelimit/test/mocks/stats" + + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +const ( + xdsNodeId = "test-node" + xdsPort = 18001 +) + +func TestXdsProvider(t *testing.T) { + intSnapshot, _ := cache.NewSnapshot("1", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc, cancel := common.StartXdsSotwServer(t, &common.XdsServerConfig{Port: xdsPort, NodeId: xdsNodeId}, intSnapshot) + defer cancel() + + s := settings.Settings{ + ConfigType: "GRPC_XDS_SOTW", + ConfigGrpcXdsNodeId: xdsNodeId, + ConfigGrpcXdsServerUrl: fmt.Sprintf("localhost:%d", xdsPort), + } + + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + statsManager := stats.NewMockStatManager(statsStore) + p := provider.NewXdsGrpcSotwProvider(s, statsManager) + defer p.Stop() + providerEventChan := p.ConfigUpdateEvent() + + snapVersion := 1 + t.Run("Test initial xDS config", testInitialXdsConfig(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test new (after initial) xDS config update", testNewXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test multi domain xDS config update", testMultiDomainXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test limits with deeper xDS config update", testDeeperLimitsXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + + err := os.Setenv("MERGE_DOMAIN_CONFIG", "true") + defer os.Unsetenv("MERGE_DOMAIN_CONFIG") + if err != nil { + t.Error("Error setting 'MERGE_DOMAIN_CONFIG' environment variable", err) + } + t.Run("Test same domain multiple times xDS config update", testSameDomainMultipleXdsConfigUpdate(setSnapshotFunc, providerEventChan)) +} + +func testInitialXdsConfig(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.Equal("foo.k1_v1: unit=MINUTE requests_per_unit=3, shadow_mode: false\n", config.Dump()) + } +} + +func testNewXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 5, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.Equal("foo.k2_v2: unit=MINUTE requests_per_unit=5, shadow_mode: false\n", config.Dump()) + } +} + +func testMultiDomainXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "bar", + Domain: "bar", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} + +func testDeeperLimitsXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + { + Key: "k2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 15, + }, + }, + }, + }, + { + Key: "j1", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "j2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + { + Key: "j2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 15, + }, + ShadowMode: true, + }, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "bar", + Domain: "bar", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + "foo.k1_v1.k2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.k1_v1.k2_v2: unit=HOUR requests_per_unit=15, shadow_mode: false", + "foo.j1_v2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.j1_v2.j2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.j1_v2.j2_v2: unit=DAY requests_per_unit=15, shadow_mode: true", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} + +func testSameDomainMultipleXdsConfigUpdate(setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot("3", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo-1", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "foo-2", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v2: unit=MINUTE requests_per_unit=100, shadow_mode: false", + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index faddbbdb..d90a8ce2 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" @@ -25,8 +26,7 @@ import ( "github.com/envoyproxy/ratelimit/test/common" mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" - mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" - mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + mock_provider "github.com/envoyproxy/ratelimit/test/mocks/provider" mock_stats "github.com/envoyproxy/ratelimit/test/mocks/stats" ) @@ -60,12 +60,11 @@ func newBarrier() barrier { type rateLimitServiceTestSuite struct { assert *assert.Assertions controller *gomock.Controller - runtime *mock_loader.MockIFace - snapshot *mock_snapshot.MockIFace cache *mock_limiter.MockRateLimitCache - configLoader *mock_config.MockRateLimitConfigLoader + configProvider *mock_provider.MockRateLimitConfigProvider + configUpdateEventChan chan provider.ConfigUpdateEvent + configUpdateEvent *mock_provider.MockConfigUpdateEvent config *mock_config.MockRateLimitConfig - runtimeUpdateCallback chan<- int statsManager stats.Manager statStore gostats.Store mockClock utils.TimeSource @@ -81,10 +80,11 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret := rateLimitServiceTestSuite{} ret.assert = assert.New(t) ret.controller = gomock.NewController(t) - ret.runtime = mock_loader.NewMockIFace(ret.controller) - ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) - ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) + ret.configProvider = mock_provider.NewMockRateLimitConfigProvider(ret.controller) + ret.configUpdateEventChan = make(chan provider.ConfigUpdateEvent) + ret.configUpdateEvent = mock_provider.NewMockConfigUpdateEvent(ret.controller) + // ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = gostats.NewStore(gostats.NewNullSink(), false) ret.statsManager = mock_stats.NewMockStatManager(ret.statStore) @@ -92,21 +92,19 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { } func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitServiceServer { - this.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( - func(callback chan<- int) { - this.runtimeUpdateCallback = callback - }) - this.runtime.EXPECT().Snapshot().Return(this.snapshot).MinTimes(1) - this.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) - this.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) - this.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, - gomock.Any(), gomock.Any()).Return(this.config) - - // reset exporter before using + barrier := newBarrier() + this.configProvider.EXPECT().ConfigUpdateEvent().Return(this.configUpdateEventChan).Times(1) + this.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return this.config, nil + }) + go func() { this.configUpdateEventChan <- this.configUpdateEvent }() // initial config update from provider + testSpanExporter.Reset() - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}, false) + svc := ratelimit.NewService(this.cache, this.configProvider, this.statsManager, MockClock{now: int64(2222)}, false, false) + barrier.wait() // wait for initial config load + return svc } // once a ratelimit service is initiated, the package always fetches a default tracer from otel runtime and it can't be change until a new round of test is run. It is necessary to keep a package level exporter in this test package in order to correctly run the tests. @@ -116,6 +114,7 @@ func TestService(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() service := t.setupBasicService() + barrier := newBarrier() // First request, config should be loaded. request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) @@ -133,12 +132,12 @@ func TestService(test *testing.T) { response) t.assert.Nil(err) - // Force a config reload. - barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + // Force a config reload - config event from config provider. + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Different request. @@ -169,13 +168,11 @@ func TestService(test *testing.T) { t.assert.Nil(err) // Config load failure. - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { - defer barrier.signal() - panic(config.RateLimitConfigError("load error")) - }) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return nil, config.RateLimitConfigError("load error") + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Config should still be valid. Also make sure order does not affect results. @@ -222,10 +219,11 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Force a config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make a request. @@ -356,10 +354,11 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make request @@ -408,10 +407,11 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make request @@ -495,17 +495,15 @@ func TestInitialLoadError(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() - t.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( - func(callback chan<- int) { t.runtimeUpdateCallback = callback }) - t.runtime.EXPECT().Snapshot().Return(t.snapshot).MinTimes(1) - t.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) - t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { - panic(config.RateLimitConfigError("load error")) - }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock, false) + t.configProvider.EXPECT().ConfigUpdateEvent().Return(t.configUpdateEventChan).Times(1) + barrier := newBarrier() + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return nil, config.RateLimitConfigError("load error") + }) + go func() { t.configUpdateEventChan <- t.configUpdateEvent }() // initial config update from provider + service := ratelimit.NewService(t.cache, t.configProvider, t.statsManager, t.mockClock, false, false) + barrier.wait() request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(context.Background(), request)