From e2eb25823832d5affe602a6308c7de7ae60370bd Mon Sep 17 00:00:00 2001 From: James Forcier Date: Wed, 11 Sep 2019 08:52:00 -0400 Subject: [PATCH] upstream: Add ability to disable host selection during panic (#8024) Previously, when in a panic state, requests would be routed to all hosts. In some cases it is instead preferable to not route any requests. Add a configuration option for zone-aware load balancers which switches from routing to all hosts to no hosts. Closes #7550. Signed-off-by: James Forcier jforcier@grubhub.com Risk Level: Low Testing: 2 new unit tests written; manual testing Docs Changes: Note about new configuration option added Release Notes: added Signed-off-by: James Forcier --- api/envoy/api/v2/cds.proto | 6 + .../load_balancing/panic_threshold.rst | 19 ++- docs/root/intro/version_history.rst | 1 + source/common/upstream/load_balancer_impl.cc | 46 +++++-- source/common/upstream/load_balancer_impl.h | 4 +- .../upstream/load_balancer_impl_test.cc | 122 +++++++++++++++++- 6 files changed, 175 insertions(+), 23 deletions(-) diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index a0095ac2aa2c..b2b71384f8c8 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -539,6 +539,12 @@ message Cluster { // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; } // Configuration for :ref:`locality weighted load balancing // ` diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst index e24022a8f07b..864ad11171e6 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst @@ -5,13 +5,24 @@ Panic threshold During load balancing, Envoy will generally only consider available (healthy or degraded) hosts in an upstream cluster. However, if the percentage of available hosts in the cluster becomes too low, -Envoy will disregard health status and balance amongst all hosts. This is known as the *panic -threshold*. The default panic threshold is 50%. This is +Envoy will disregard health status and balance either amongst all hosts or no hosts. This is known +as the *panic threshold*. The default panic threshold is 50%. This is :ref:`configurable ` via runtime as well as in the :ref:`cluster configuration `. The panic threshold is used to avoid a situation in which host failures cascade throughout the cluster as load increases. +There are two modes Envoy can choose from when in a panic state: traffic will either be sent to all +hosts, or will be sent to no hosts (and therefore will always fail). This is configured in the +:ref:`cluster configuration `. +Choosing to fail traffic during panic scenarios can help avoid overwhelming potentially failing +upstream services, as it will reduce the load on the upstream service before all hosts have been +determined to be unhealthy. However, it eliminates the possibility of _some_ requests succeeding +even when many or all hosts in a cluster are unhealthy. This may be a good tradeoff to make if a +given service is observed to fail in an all-or-nothing pattern, as it will more quickly cut off +requests to the cluster. Conversely, if a cluster typically continues to successfully service _some_ +requests even when degraded, enabling this option is probably unhelpful. + Panic thresholds work in conjunction with priorities. If the number of available hosts in a given priority goes down, Envoy will try to shift some traffic to lower priorities. If it succeeds in finding enough available hosts in lower priorities, Envoy will disregard panic thresholds. In @@ -20,8 +31,8 @@ disregards panic thresholds and continues to distribute traffic load across prio the algorithm described :ref:`here `. However, when normalized total availability drops below 100%, Envoy assumes that there are not enough available hosts across all priority levels. It continues to distribute traffic load across priorities, -but if a given priority level's availability is below the panic threshold, traffic will go to all hosts -in that priority level regardless of their availability. +but if a given priority level's availability is below the panic threshold, traffic will go to all +(or no) hosts in that priority level regardless of their availability. The following examples explain the relationship between normalized total availability and panic threshold. It is assumed that the default value of 50% is used for the panic threshold. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 679ea4f9e769..42e8c463c0d6 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -59,6 +59,7 @@ Version history * upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. * upstream: added network filter chains to upstream connections, see :ref:`filters`. * upstream: use p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1. +* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. * zookeeper: parse responses and emit latency stats. 1.11.1 (August 13, 2019) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 79bf87b79c44..b3172d9c9a27 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -282,7 +282,8 @@ ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase( routing_enabled_(PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT( common_config.zone_aware_lb_config(), routing_enabled, 100, 100)), min_cluster_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(common_config.zone_aware_lb_config(), - min_cluster_size, 6U)) { + min_cluster_size, 6U)), + fail_traffic_on_panic_(common_config.zone_aware_lb_config().fail_traffic_on_panic()) { ASSERT(!priority_set.hostSetsPerPriority().empty()); resizePerPriorityState(); priority_set_.addPriorityUpdateCb( @@ -539,7 +540,7 @@ uint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& h return i; } -ZoneAwareLoadBalancerBase::HostsSource +absl::optional ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { auto host_set_and_source = chooseHostSet(context); @@ -549,11 +550,16 @@ ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { HostsSource hosts_source; hosts_source.priority_ = host_set.priority(); - // If the selected host set has insufficient healthy hosts, return all hosts. + // If the selected host set has insufficient healthy hosts, return all hosts (unless we should + // fail traffic on panic, in which case return no host). if (per_priority_panic_[hosts_source.priority_]) { stats_.lb_healthy_panic_.inc(); - hosts_source.source_type_ = HostsSource::SourceType::AllHosts; - return hosts_source; + if (fail_traffic_on_panic_) { + return absl::nullopt; + } else { + hosts_source.source_type_ = HostsSource::SourceType::AllHosts; + return hosts_source; + } } // If we're doing locality weighted balancing, pick locality. @@ -586,10 +592,14 @@ ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { if (isGlobalPanic(localHostSet())) { stats_.lb_local_cluster_not_ok_.inc(); - // If the local Envoy instances are in global panic, do not do locality - // based routing. - hosts_source.source_type_ = sourceType(host_availability); - return hosts_source; + // If the local Envoy instances are in global panic, and we should not fail traffic, do + // not do locality based routing. + if (fail_traffic_on_panic_) { + return absl::nullopt; + } else { + hosts_source.source_type_ = sourceType(host_availability); + return hosts_source; + } } hosts_source.source_type_ = localitySourceType(host_availability); @@ -699,8 +709,11 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* context) { - const HostsSource hosts_source = hostSourceToUse(context); - auto scheduler_it = scheduler_.find(hosts_source); + const absl::optional hosts_source = hostSourceToUse(context); + if (!hosts_source) { + return nullptr; + } + auto scheduler_it = scheduler_.find(*hosts_source); // We should always have a scheduler for any return value from // hostSourceToUse() via the construction in refresh(); ASSERT(scheduler_it != scheduler_.end()); @@ -717,11 +730,11 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont } return host; } else { - const HostVector& hosts_to_use = hostSourceToHosts(hosts_source); + const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source); if (hosts_to_use.empty()) { return nullptr; } - return unweightedHostPick(hosts_to_use, hosts_source); + return unweightedHostPick(hosts_to_use, *hosts_source); } } @@ -749,7 +762,12 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector } HostConstSharedPtr RandomLoadBalancer::chooseHostOnce(LoadBalancerContext* context) { - const HostVector& hosts_to_use = hostSourceToHosts(hostSourceToUse(context)); + const absl::optional hosts_source = hostSourceToUse(context); + if (!hosts_source) { + return nullptr; + } + + const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source); if (hosts_to_use.empty()) { return nullptr; } diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 98fd0c75d7d1..988955f359ab 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -223,8 +223,9 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { /** * Pick the host source to use, doing zone aware routing when the hosts are sufficiently healthy. + * If no host is chosen (due to fail_traffic_on_panic being set), return absl::nullopt. */ - HostsSource hostSourceToUse(LoadBalancerContext* context); + absl::optional hostSourceToUse(LoadBalancerContext* context); /** * Index into priority_set via hosts source descriptor. @@ -300,6 +301,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { const uint32_t routing_enabled_; const uint64_t min_cluster_size_; + const bool fail_traffic_on_panic_; struct PerPriorityState { // The percent of requests which can be routed to the local locality. diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index e29dd9b0dcd8..a2e7366f5ece 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -539,6 +539,39 @@ TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSet) { EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr)); } +// Test that extending the priority set with an existing LB causes the correct updates when the +// cluster is configured to disable on panic. +TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSetDisableOnPanic) { + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); + + init(false); + // With both the primary and failover hosts unhealthy, we should select no host. + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); + + // Update the priority set with a new priority level P=2 and ensure the host + // is chosen + MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + tertiary_host_set_.hosts_ = *hosts; + tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; + HostVector add_hosts; + add_hosts.push_back(tertiary_host_set_.hosts_[0]); + tertiary_host_set_.runCallbacks(add_hosts, {}); + EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr)); + + // Now add a healthy host in P=0 and make sure it is immediately selected. + host_set_.healthy_hosts_ = host_set_.hosts_; + host_set_.runCallbacks(add_hosts, {}); + EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr)); + + // Remove the healthy host and ensure we fail back over to tertiary_host_set_ + host_set_.healthy_hosts_ = {}; + host_set_.runCallbacks({}, {}); + EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr)); +} + // Test extending the priority set. TEST_P(FailoverTest, ExtendPrioritiesUpdatingPrioritySet) { host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; @@ -829,6 +862,32 @@ TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanic) { EXPECT_EQ(3UL, stats_.lb_healthy_panic_.value()); } +// Test that no hosts are selected when fail_traffic_on_panic is enabled. +TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanicDisableOnPanic) { + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), + makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().hosts_ = { + makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), + makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), + makeTestHost(info_, "tcp://127.0.0.1:84"), makeTestHost(info_, "tcp://127.0.0.1:85")}; + + common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); + + init(false); + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); + + // Take the threshold back above the panic threshold. + hostSet().healthy_hosts_ = { + makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), + makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83")}; + hostSet().runCallbacks({}, {}); + + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value()); +} + // Ensure if the panic threshold is 0%, panic mode is disabled. TEST_P(RoundRobinLoadBalancerTest, DisablePanicMode) { hostSet().healthy_hosts_ = {}; @@ -1177,6 +1236,41 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) { EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value()); } +TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmptyFailTrafficOnPanic) { + common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); + + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } + HostVectorSharedPtr upstream_hosts(new HostVector( + {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); + HostVectorSharedPtr local_hosts(new HostVector({}, {})); + + HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality( + {{makeTestHost(info_, "tcp://127.0.0.1:80")}, {makeTestHost(info_, "tcp://127.0.0.1:81")}}); + HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality({{}, {}}); + + EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 50)) + .WillOnce(Return(50)) + .WillOnce(Return(50)); + EXPECT_CALL(runtime_.snapshot_, featureEnabled("upstream.zone_routing.enabled", 100)) + .WillOnce(Return(true)); + EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.zone_routing.min_cluster_size", 6)) + .WillOnce(Return(1)); + + hostSet().healthy_hosts_ = *upstream_hosts; + hostSet().hosts_ = *upstream_hosts; + hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality; + init(true); + updateHosts(local_hosts, local_hosts_per_locality); + + // Local cluster is not OK, we'll do regular routing (and select no host, since we're in global + // panic). + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); + EXPECT_EQ(0U, stats_.lb_healthy_panic_.value()); + EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value()); +} + // Validate that if we have healthy host lists >= 2, but there is no local // locality included, that we skip zone aware routing and fallback. TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) { @@ -1388,20 +1482,40 @@ INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LeastRequestLoadBalancerTest, class RandomLoadBalancerTest : public LoadBalancerTestBase { public: - RandomLoadBalancer lb_{priority_set_, nullptr, stats_, runtime_, random_, common_config_}; + void init() { + lb_.reset( + new RandomLoadBalancer(priority_set_, nullptr, stats_, runtime_, random_, common_config_)); + } + std::shared_ptr lb_; }; -TEST_P(RandomLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } +TEST_P(RandomLoadBalancerTest, NoHosts) { + init(); + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); +} TEST_P(RandomLoadBalancerTest, Normal) { + init(); + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(3)); - EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + +TEST_P(RandomLoadBalancerTest, FailClusterOnPanic) { + common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); + init(); + + hostSet().healthy_hosts_ = {}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), + makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); } INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, RandomLoadBalancerTest, ::testing::Values(true, false));