From bf274e2ec36ee1542a23530549d427ca272b46e0 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Fri, 28 Apr 2023 10:17:09 +0000 Subject: [PATCH 01/11] Add new idea for selecting hosts among those not selected yet. Signed-off-by: Leonardo da Mata --- source/common/upstream/load_balancer_impl.cc | 23 +++++++++++++++++-- .../upstream/load_balancer_impl_test.cc | 4 ++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 15bfa82596c43..c5efcdb343e16 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1299,9 +1299,26 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector const HostsSource&) { HostSharedPtr candidate_host = nullptr; + uint32_t hosts_to_use_current_size = hosts_to_use.size(); + HostVectorSharedPtr hosts( new HostVector(hosts_to_use)); + for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { - const int rand_idx = random_.random() % hosts_to_use.size(); - const HostSharedPtr& sampled_host = hosts_to_use[rand_idx]; + const int rand_idx = random_.random() % hosts_to_use_current_size; + const HostSharedPtr& sampled_host = hosts->at(rand_idx); + std::cout <<"pick: " << sampled_host->address()->asString() << "\n"; + + // Swap selected host with latest one and skip latest on next iteration + // so we don't repeat the selection when there are enough hosts. + uint32_t last_host_idx = hosts_to_use_current_size-1; + if ( hosts_to_use.size() > choice_count_ ) { + --hosts_to_use_current_size; + + hosts->at(rand_idx) = hosts->at(last_host_idx); + std::cout << "Modified\n"; + for (auto& it : *hosts) { + std::cout << it->address()->asString() << "\n"; + } + } if (candidate_host == nullptr) { @@ -1317,6 +1334,8 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector } } + + std::cout << "CHOSEN " << candidate_host->address()->asString() << "\n"; return candidate_host; } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 518f2a4de1d00..69aa94a3c6419 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2858,11 +2858,11 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { // 0 choices configured should default to P2C. EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_.chooseHost(nullptr)); // 2 choices configured results in P2C. EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); // 5 choices configured results in P5C. EXPECT_CALL(random_, random()).Times(6).WillRepeatedly(Return(0)); From e5237d55e5429f09b2cff9e96bc4ee7f2e952067 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Fri, 28 Apr 2023 15:05:49 +0000 Subject: [PATCH 02/11] Change how we choose full table scan Signed-off-by: Leonardo da Mata --- api/envoy/config/cluster/v3/cluster.proto | 5 ++++ .../least_request/v3/least_request.proto | 5 ++++ source/common/upstream/load_balancer_impl.cc | 25 ++++++++++++++++-- source/common/upstream/load_balancer_impl.h | 14 ++++++++-- .../upstream/load_balancer_impl_test.cc | 26 +++++++++++-------- 5 files changed, 60 insertions(+), 15 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 91535b9ee1aa4..e716cbe5da979 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -476,6 +476,11 @@ message Cluster { // Configuration for slow start mode. // If this configuration is not set, slow start will not be not enabled. SlowStartConfig slow_start_config = 3; + + // Configuration for performing full scan on the list of hosts. + // If this configuration is set, when selecting the host a full scan on the list hosts will be + // used to select the one with least requests instead of using random choices. + google.protobuf.BoolValue full_scan_hosts = 4; } // Specific configuration for the :ref:`RingHash` diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 87a379c669124..b9032e53b8d84 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -58,4 +58,9 @@ message LeastRequest { // Configuration for local zone aware load balancing or locality weighted load balancing. common.v3.LocalityLbConfig locality_lb_config = 4; + + // Configuration for performing full scan on the list of hosts. + // If this configuration is set, when selecting the host a full scan on the list hosts will be + // used to select the one with least requests instead of using random choices. + google.protobuf.BoolValue full_scan_hosts = 5; } diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index c5efcdb343e16..1dc2c406a6c78 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1299,16 +1299,37 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector const HostsSource&) { HostSharedPtr candidate_host = nullptr; + // We do a full scan if the number of choices is equal to the size. + if ((hosts_to_use.size() <= choice_count_ )|| full_scan_hosts_) { + for (auto& sampled_host : hosts_to_use) { + if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. + candidate_host = sampled_host; + continue; + } + + const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); + const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); + if (sampled_active_rq < candidate_active_rq) { + candidate_host = sampled_host; + } + } + return candidate_host; + } + uint32_t hosts_to_use_current_size = hosts_to_use.size(); HostVectorSharedPtr hosts( new HostVector(hosts_to_use)); - + for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use_current_size; const HostSharedPtr& sampled_host = hosts->at(rand_idx); std::cout <<"pick: " << sampled_host->address()->asString() << "\n"; - // Swap selected host with latest one and skip latest on next iteration + // Swap selected host with latest one and skip latest one on next iteration // so we don't repeat the selection when there are enough hosts. + // This will prevent use to choose the same host on our selection since there + // is a higher chance of selecting the same host when the number of hosts is + // too big. uint32_t last_host_idx = hosts_to_use_current_size-1; if ( hosts_to_use.size() > choice_count_ ) { --hosts_to_use_current_size; diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 8338da065da42..6de7dce4be842 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -690,11 +690,16 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.ref(), choice_count, 2) : 2), + active_request_bias_runtime_( least_request_config.has_value() && least_request_config->has_active_request_bias() ? absl::optional( {least_request_config->active_request_bias(), runtime}) - : absl::nullopt) { + : absl::nullopt), + full_scan_hosts_( + least_request_config.has_value() + ? least_request_config->full_scan_hosts().value() + : false) { initialize(); } @@ -709,11 +714,15 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { LoadBalancerConfigHelper::localityLbConfigFromProto(least_request_config), LoadBalancerConfigHelper::slowStartConfigFromProto(least_request_config), time_source), choice_count_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config, choice_count, 2)), + active_request_bias_runtime_( least_request_config.has_active_request_bias() ? absl::optional( {least_request_config.active_request_bias(), runtime}) - : absl::nullopt) { + : absl::nullopt), + full_scan_hosts_( + least_request_config.has_full_scan_hosts() ? least_request_config.full_scan_hosts().value() + : false) { initialize(); } @@ -749,6 +758,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; + const bool full_scan_hosts_; }; /** diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 69aa94a3c6419..3e50e427c63df 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2850,8 +2850,11 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { lr_lb_config.mutable_choice_count()->set_value(2); LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, random_, common_config_, lr_lb_config, simTime()}; - lr_lb_config.mutable_choice_count()->set_value(5); - LeastRequestLoadBalancer lb_5{priority_set_, nullptr, stats_, runtime_, + lr_lb_config.mutable_choice_count()->set_value(3); + LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(4); + LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, random_, common_config_, lr_lb_config, simTime()}; // Verify correct number of choices. @@ -2864,20 +2867,21 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); - // 5 choices configured results in P5C. - EXPECT_CALL(random_, random()).Times(6).WillRepeatedly(Return(0)); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb_5.chooseHost(nullptr)); - // Verify correct host chosen in P5C scenario. + // Verify correct host chosen in P3C scenario. EXPECT_CALL(random_, random()) - .Times(6) - .WillOnce(Return(0)) - .WillOnce(Return(3)) + .Times(3) .WillOnce(Return(0)) .WillOnce(Return(3)) - .WillOnce(Return(2)) .WillOnce(Return(1)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_5.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); + + + // When the number of hosts is smaller or equal to the number of choices we don't call + // random() since we do a full table scan. + EXPECT_CALL(random_, random()).Times(0); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_4.chooseHost(nullptr)); + } TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { From d9f6c1aadeabd924121a371b49184760b280d81f Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Tue, 2 May 2023 13:23:17 +0000 Subject: [PATCH 03/11] Remove cout Signed-off-by: Leonardo da Mata --- source/common/upstream/load_balancer_impl.cc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 1dc2c406a6c78..ece3823b342fa 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1323,7 +1323,6 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use_current_size; const HostSharedPtr& sampled_host = hosts->at(rand_idx); - std::cout <<"pick: " << sampled_host->address()->asString() << "\n"; // Swap selected host with latest one and skip latest one on next iteration // so we don't repeat the selection when there are enough hosts. @@ -1335,10 +1334,6 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector --hosts_to_use_current_size; hosts->at(rand_idx) = hosts->at(last_host_idx); - std::cout << "Modified\n"; - for (auto& it : *hosts) { - std::cout << it->address()->asString() << "\n"; - } } if (candidate_host == nullptr) { @@ -1355,8 +1350,6 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector } } - - std::cout << "CHOSEN " << candidate_host->address()->asString() << "\n"; return candidate_host; } From 771118159b75235b9df8fd123c4f03491c347cb5 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Fri, 29 Sep 2023 19:38:10 +0000 Subject: [PATCH 04/11] Fix Tests for load_balancer_impl_test Signed-off-by: Leonardo da Mata --- .../least_request/v3/least_request.proto | 1 + source/common/upstream/load_balancer_impl.cc | 26 +++--- source/common/upstream/load_balancer_impl.h | 15 ++-- .../upstream/load_balancer_impl_test.cc | 87 ++++++++++++++++--- 4 files changed, 94 insertions(+), 35 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index b9032e53b8d84..66a3453d92f77 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -22,6 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy // extension point. See the :ref:`load balancing architecture overview // ` for more information. +// [#next-free-field: 6] message LeastRequest { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index ece3823b342fa..5613e872bdf89 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1300,39 +1300,39 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector HostSharedPtr candidate_host = nullptr; // We do a full scan if the number of choices is equal to the size. - if ((hosts_to_use.size() <= choice_count_ )|| full_scan_hosts_) { + if ((hosts_to_use.size() <= choice_count_) || full_scan_hosts_) { for (auto& sampled_host : hosts_to_use) { - if (candidate_host == nullptr) { - // Make a first choice to start the comparisons. - candidate_host = sampled_host; - continue; + if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. + candidate_host = sampled_host; + continue; } const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); if (sampled_active_rq < candidate_active_rq) { - candidate_host = sampled_host; - } + candidate_host = sampled_host; + } } return candidate_host; } - + uint32_t hosts_to_use_current_size = hosts_to_use.size(); - HostVectorSharedPtr hosts( new HostVector(hosts_to_use)); + HostVectorSharedPtr hosts(new HostVector(hosts_to_use)); for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use_current_size; const HostSharedPtr& sampled_host = hosts->at(rand_idx); - // Swap selected host with latest one and skip latest one on next iteration + // Swap selected host with latest one and skip latest one on next iteration // so we don't repeat the selection when there are enough hosts. // This will prevent use to choose the same host on our selection since there // is a higher chance of selecting the same host when the number of hosts is // too big. - uint32_t last_host_idx = hosts_to_use_current_size-1; - if ( hosts_to_use.size() > choice_count_ ) { + uint32_t last_host_idx = hosts_to_use_current_size - 1; + if (hosts_to_use.size() > choice_count_) { --hosts_to_use_current_size; - + hosts->at(rand_idx) = hosts->at(last_host_idx); } diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 6de7dce4be842..e8e0f430d4c07 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -690,16 +690,15 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.ref(), choice_count, 2) : 2), - + active_request_bias_runtime_( least_request_config.has_value() && least_request_config->has_active_request_bias() ? absl::optional( {least_request_config->active_request_bias(), runtime}) : absl::nullopt), - full_scan_hosts_( - least_request_config.has_value() - ? least_request_config->full_scan_hosts().value() - : false) { + full_scan_hosts_(least_request_config.has_value() + ? least_request_config->full_scan_hosts().value() + : false) { initialize(); } @@ -720,9 +719,9 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { ? absl::optional( {least_request_config.active_request_bias(), runtime}) : absl::nullopt), - full_scan_hosts_( - least_request_config.has_full_scan_hosts() ? least_request_config.full_scan_hosts().value() - : false) { + full_scan_hosts_(least_request_config.has_full_scan_hosts() + ? least_request_config.full_scan_hosts().value() + : false) { initialize(); } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 3e50e427c63df..defd86de1027c 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2787,20 +2787,20 @@ TEST_P(LeastRequestLoadBalancerTest, SingleHost) { // Host weight is 1. { - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } // Host weight is 100. { - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } HostVector empty; { hostSet().runCallbacks(empty, empty); - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } @@ -2810,7 +2810,7 @@ TEST_P(LeastRequestLoadBalancerTest, SingleHost) { hostSet().healthy_hosts_.clear(); hostSet().hosts_.clear(); hostSet().runCallbacks(empty, remove_hosts); - EXPECT_CALL(random_, random()).WillOnce(Return(0)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } } @@ -2823,12 +2823,12 @@ TEST_P(LeastRequestLoadBalancerTest, Normal) { hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); hostSet().healthy_hosts_[1]->stats().rq_active_.set(2); - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); hostSet().healthy_hosts_[0]->stats().rq_active_.set(2); hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr)); } @@ -2836,7 +2836,8 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), - makeTestHost(info_, "tcp://127.0.0.1:83", simTime())}; + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -2844,6 +2845,7 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { hostSet().healthy_hosts_[1]->stats().rq_active_.set(3); hostSet().healthy_hosts_[2]->stats().rq_active_.set(2); hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[4]->stats().rq_active_.set(5); // Creating various load balancer objects with different choice configs. envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; @@ -2856,7 +2858,9 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { lr_lb_config.mutable_choice_count()->set_value(4); LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, random_, common_config_, lr_lb_config, simTime()}; - + lr_lb_config.mutable_choice_count()->set_value(6); + LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; // Verify correct number of choices. // 0 choices configured should default to P2C. @@ -2867,21 +2871,76 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); - // Verify correct host chosen in P3C scenario. EXPECT_CALL(random_, random()) - .Times(3) + .Times(4) .WillOnce(Return(0)) .WillOnce(Return(3)) - .WillOnce(Return(1)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); + .WillOnce(Return(1)) + .WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_3.chooseHost(nullptr)); + // Verify correct host chosen in P4C scenario. + EXPECT_CALL(random_, random()) + .Times(5) + .WillOnce(Return(0)) + .WillOnce(Return(3)) + .WillOnce(Return(1)) + .WillOnce(Return(1)) + .WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_4.chooseHost(nullptr)); // When the number of hosts is smaller or equal to the number of choices we don't call // random() since we do a full table scan. - EXPECT_CALL(random_, random()).Times(0); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb_4.chooseHost(nullptr)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_6.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, FullScan) { + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(4); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[2]->stats().rq_active_.set(2); + hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[4]->stats().rq_active_.set(5); + + // Creating various load balancer objects with different choice configs. + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_choice_count()->set_value(2); + // Enable full table scan on hosts + lr_lb_config.mutable_full_scan_hosts()->set_value(true); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(3); + LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(4); + LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(6); + LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + + // random is called only once everytime and is not to select the host. + + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); + + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); + + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_4.chooseHost(nullptr)); + EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_6.chooseHost(nullptr)); } TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { From 032d925e26496925d9312b656ddad4f829bfce87 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Fri, 13 Oct 2023 23:01:22 +0000 Subject: [PATCH 05/11] Fix format and make sure full scan happens only when selected or the number of choices is larger than the size. Signed-off-by: Leonardo da Mata --- api/envoy/config/cluster/v3/cluster.proto | 2 +- .../least_request/v3/least_request.proto | 2 +- source/common/upstream/load_balancer_impl.cc | 26 +++++-------------- source/common/upstream/load_balancer_impl.h | 14 ++++------ .../upstream/load_balancer_impl_test.cc | 24 ++++++++--------- 5 files changed, 25 insertions(+), 43 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index e716cbe5da979..b49bbeeea17e0 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -480,7 +480,7 @@ message Cluster { // Configuration for performing full scan on the list of hosts. // If this configuration is set, when selecting the host a full scan on the list hosts will be // used to select the one with least requests instead of using random choices. - google.protobuf.BoolValue full_scan_hosts = 4; + google.protobuf.BoolValue enable_full_scan = 4; } // Specific configuration for the :ref:`RingHash` diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 66a3453d92f77..81c004ea9c7d2 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -63,5 +63,5 @@ message LeastRequest { // Configuration for performing full scan on the list of hosts. // If this configuration is set, when selecting the host a full scan on the list hosts will be // used to select the one with least requests instead of using random choices. - google.protobuf.BoolValue full_scan_hosts = 5; + google.protobuf.BoolValue enable_full_scan = 5; } diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 5613e872bdf89..98260a6be7e4f 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1299,9 +1299,10 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector const HostsSource&) { HostSharedPtr candidate_host = nullptr; - // We do a full scan if the number of choices is equal to the size. - if ((hosts_to_use.size() <= choice_count_) || full_scan_hosts_) { - for (auto& sampled_host : hosts_to_use) { + // Do full scan if it's required explicitly or the number of choices is equal to or larger than + // the hosts size. + if ((hosts_to_use.size() <= choice_count_) || enable_full_scan_) { + for (const auto& sampled_host : hosts_to_use) { if (candidate_host == nullptr) { // Make a first choice to start the comparisons. candidate_host = sampled_host; @@ -1317,24 +1318,9 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector return candidate_host; } - uint32_t hosts_to_use_current_size = hosts_to_use.size(); - HostVectorSharedPtr hosts(new HostVector(hosts_to_use)); - for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { - const int rand_idx = random_.random() % hosts_to_use_current_size; - const HostSharedPtr& sampled_host = hosts->at(rand_idx); - - // Swap selected host with latest one and skip latest one on next iteration - // so we don't repeat the selection when there are enough hosts. - // This will prevent use to choose the same host on our selection since there - // is a higher chance of selecting the same host when the number of hosts is - // too big. - uint32_t last_host_idx = hosts_to_use_current_size - 1; - if (hosts_to_use.size() > choice_count_) { - --hosts_to_use_current_size; - - hosts->at(rand_idx) = hosts->at(last_host_idx); - } + const int rand_idx = random_.random() % hosts_to_use.size(); + const HostSharedPtr& sampled_host = hosts_to_use[rand_idx]; if (candidate_host == nullptr) { diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index e8e0f430d4c07..d7d029ef45cec 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -690,15 +690,13 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.ref(), choice_count, 2) : 2), - active_request_bias_runtime_( least_request_config.has_value() && least_request_config->has_active_request_bias() ? absl::optional( {least_request_config->active_request_bias(), runtime}) : absl::nullopt), - full_scan_hosts_(least_request_config.has_value() - ? least_request_config->full_scan_hosts().value() - : false) { + enable_full_scan_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.ref(), enable_full_scan, false)) { initialize(); } @@ -713,15 +711,13 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { LoadBalancerConfigHelper::localityLbConfigFromProto(least_request_config), LoadBalancerConfigHelper::slowStartConfigFromProto(least_request_config), time_source), choice_count_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config, choice_count, 2)), - active_request_bias_runtime_( least_request_config.has_active_request_bias() ? absl::optional( {least_request_config.active_request_bias(), runtime}) : absl::nullopt), - full_scan_hosts_(least_request_config.has_full_scan_hosts() - ? least_request_config.full_scan_hosts().value() - : false) { + enable_full_scan_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config, enable_full_scan, false)) { initialize(); } @@ -757,7 +753,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; - const bool full_scan_hosts_; + const bool enable_full_scan_; }; /** diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index defd86de1027c..abf424c057af7 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2787,20 +2787,20 @@ TEST_P(LeastRequestLoadBalancerTest, SingleHost) { // Host weight is 1. { - EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } // Host weight is 100. { - EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } HostVector empty; { hostSet().runCallbacks(empty, empty); - EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } @@ -2810,7 +2810,7 @@ TEST_P(LeastRequestLoadBalancerTest, SingleHost) { hostSet().healthy_hosts_.clear(); hostSet().hosts_.clear(); hostSet().runCallbacks(empty, remove_hosts); - EXPECT_CALL(random_, random()).WillOnce(Return(9999)); + EXPECT_CALL(random_, random()).WillOnce(Return(0)); EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } } @@ -2865,11 +2865,11 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { // 0 choices configured should default to P2C. EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); // 2 choices configured results in P2C. EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); // Verify correct host chosen in P3C scenario. EXPECT_CALL(random_, random()) @@ -2877,8 +2877,8 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { .WillOnce(Return(0)) .WillOnce(Return(3)) .WillOnce(Return(1)) - .WillOnce(Return(9999)); - EXPECT_EQ(hostSet().healthy_hosts_[2], lb_3.chooseHost(nullptr)); + .WillOnce(Return(2)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); // Verify correct host chosen in P4C scenario. EXPECT_CALL(random_, random()) @@ -2887,8 +2887,8 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { .WillOnce(Return(3)) .WillOnce(Return(1)) .WillOnce(Return(1)) - .WillOnce(Return(9999)); - EXPECT_EQ(hostSet().healthy_hosts_[2], lb_4.chooseHost(nullptr)); + .WillOnce(Return(2)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_4.chooseHost(nullptr)); // When the number of hosts is smaller or equal to the number of choices we don't call // random() since we do a full table scan. @@ -2915,7 +2915,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScan) { envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts - lr_lb_config.mutable_full_scan_hosts()->set_value(true); + lr_lb_config.mutable_enable_full_scan()->set_value(true); LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, random_, common_config_, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(3); @@ -2928,7 +2928,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScan) { LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, random_, common_config_, lr_lb_config, simTime()}; - // random is called only once everytime and is not to select the host. + // random is called only once every time and is not to select the host. EXPECT_CALL(random_, random()).WillOnce(Return(9999)); EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); From 99863c0f6da479e9b506143a204e21addbbb9780 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Thu, 19 Oct 2023 15:38:02 +0000 Subject: [PATCH 06/11] Enable new option on extesions api only Signed-off-by: Leonardo da Mata --- api/envoy/config/cluster/v3/cluster.proto | 5 ----- source/common/upstream/load_balancer_impl.h | 3 +-- .../upstream/load_balancer_impl_test.cc | 20 ++++++++++--------- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index b49bbeeea17e0..91535b9ee1aa4 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -476,11 +476,6 @@ message Cluster { // Configuration for slow start mode. // If this configuration is not set, slow start will not be not enabled. SlowStartConfig slow_start_config = 3; - - // Configuration for performing full scan on the list of hosts. - // If this configuration is set, when selecting the host a full scan on the list hosts will be - // used to select the one with least requests instead of using random choices. - google.protobuf.BoolValue enable_full_scan = 4; } // Specific configuration for the :ref:`RingHash` diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index d7d029ef45cec..6940ba8a4a452 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -695,8 +695,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { ? absl::optional( {least_request_config->active_request_bias(), runtime}) : absl::nullopt), - enable_full_scan_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.ref(), enable_full_scan, false)) { + enable_full_scan_(false) { initialize(); } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index abf424c057af7..c9b572f9b3b30 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2912,21 +2912,23 @@ TEST_P(LeastRequestLoadBalancerTest, FullScan) { hostSet().healthy_hosts_[4]->stats().rq_active_.set(5); // Creating various load balancer objects with different choice configs. - envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts lr_lb_config.mutable_enable_full_scan()->set_value(true); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config, simTime()}; + common_config_.mutable_healthy_panic_threshold()->set_value(0); + + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(3); - LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config, simTime()}; + LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(4); - LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config, simTime()}; + LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(6); - LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config, simTime()}; + LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; // random is called only once every time and is not to select the host. From e58b34f32b375114c1feb12b3be48876ef82b36a Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Sat, 21 Oct 2023 17:13:51 +0000 Subject: [PATCH 07/11] Fix Integration tests. Signed-off-by: Leonardo da Mata --- source/common/upstream/load_balancer_impl.h | 5 ++--- test/integration/http_subset_lb_integration_test.cc | 5 ++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 6940ba8a4a452..eafff556426ee 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -694,8 +694,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { least_request_config.has_value() && least_request_config->has_active_request_bias() ? absl::optional( {least_request_config->active_request_bias(), runtime}) - : absl::nullopt), - enable_full_scan_(false) { + : absl::nullopt) { initialize(); } @@ -752,7 +751,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; - const bool enable_full_scan_; + const bool enable_full_scan_{}; }; /** diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 11707c624851b..bf2969e35bd26 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -176,7 +176,10 @@ class HttpSubsetLbIntegrationTest } } - if (is_hash_lb_) { + // The default number of choices for the LEAST_REQUEST policy is 2 hosts, if the number of hosts + // is equal to the number of choices, a full scan happens instead, this means that the same host + // will be chosen. + if (is_hash_lb_ || (GetParam() == envoy::config::cluster::v3::Cluster::LEAST_REQUEST)) { EXPECT_EQ(hosts.size(), 1) << "Expected a single unique host to be selected for " << envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()); } else { From f98ac942df2160daaf38df1929a9c6314b0953c8 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Mon, 23 Oct 2023 09:59:54 +0000 Subject: [PATCH 08/11] Add release notes for full scan in least request LB. Signed-off-by: Leonardo da Mata --- changelogs/current.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 6e48e9794a32c..5a5e6a1ad2756 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -4,7 +4,12 @@ behavior_changes: # *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* minor_behavior_changes: -# *Changes that may cause incompatibilities for some users, but should not for most* +- area: upstream + change: | + Fixed a reported issue (https://github.com/envoyproxy/envoy/issues/11004) that causes the Least + Request load balancer policy to be unfair when the number of hosts are very small, when the number + of hosts is smaller than the choice_count, instead of randomly selection hosts from the list, we + perform a full scan on it to choose the host with least requests. bug_fixes: # *Changes expected to improve the state of the world and are unlikely to have negative effects* @@ -20,5 +25,9 @@ removed_config_or_runtime: # *Normally occurs at the end of the* :ref:`deprecation period ` new_features: +- area: upstream + change: | + Adds the option enable_full_scan on :ref:`load balancer policy ` + so instead of using the choice_count to choose the hosts, we do a full scan on this list of hosts. deprecated: From 0aea580f4712898ba98be0d09dce373d091f53ae Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Mon, 23 Oct 2023 12:14:24 +0000 Subject: [PATCH 09/11] Fix ref for release note. Signed-off-by: Leonardo da Mata --- changelogs/current.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 5a5e6a1ad2756..5828f1e020f80 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -27,7 +27,7 @@ removed_config_or_runtime: new_features: - area: upstream change: | - Adds the option enable_full_scan on :ref:`load balancer policy ` + Adds the option enable_full_scan on :ref:`least_request ` so instead of using the choice_count to choose the hosts, we do a full scan on this list of hosts. deprecated: From 3c15f4e6a66c3587b9539ec5616b9173c03b4c6a Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Mon, 23 Oct 2023 12:39:20 +0000 Subject: [PATCH 10/11] Fix release notes Signed-off-by: Leonardo da Mata --- changelogs/current.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 5828f1e020f80..a619825ac25cf 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -27,7 +27,7 @@ removed_config_or_runtime: new_features: - area: upstream change: | - Adds the option enable_full_scan on :ref:`least_request ` + Adds the option enable_full_scan on envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest so instead of using the choice_count to choose the hosts, we do a full scan on this list of hosts. deprecated: From 1f9b298f02806a6ea749abc655c66e7cea30d1d4 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Tue, 24 Oct 2023 13:10:39 +0000 Subject: [PATCH 11/11] Update release note Signed-off-by: Leonardo da Mata --- changelogs/current.yaml | 7 +++++-- .../upstream/load_balancing/load_balancers.rst | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index a619825ac25cf..880ec81c560fa 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -27,7 +27,10 @@ removed_config_or_runtime: new_features: - area: upstream change: | - Adds the option enable_full_scan on envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest - so instead of using the choice_count to choose the hosts, we do a full scan on this list of hosts. + Added :ref:`enable_full_scan ` + option to the least requested load balancer. If set to true, Envoy will perform a full scan on the list of hosts + instead of using :ref:`choice_count + ` + to select the hosts. deprecated: diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst index e99fe65b231ca..f6deaa4968a83 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst @@ -38,7 +38,9 @@ same or different weights. approach is nearly as good as an O(N) full scan). This is also known as P2C (power of two choices). The P2C load balancer has the property that a host with the highest number of active requests in the cluster will never receive new requests. It will be allowed to drain until it is - less than or equal to all of the other hosts. + less than or equal to all of the other hosts. The number of hosts chosen can be changed by setting + ``choice_count``. + * *all weights not equal*: If two or more hosts in the cluster have different load balancing weights, the load balancer shifts into a mode where it uses a weighted round robin schedule in which weights are dynamically adjusted based on the host's request load at the time of selection.