Skip to content

Commit

Permalink
addressing comments
Browse files Browse the repository at this point in the history
Signed-off-by: Shulin Jia <shulin@squareup.com>
  • Loading branch information
shulin-sq committed Aug 23, 2024
1 parent e2659f1 commit a871846
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 119 deletions.
6 changes: 3 additions & 3 deletions changelogs/current.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@ date: Pending

behavior_changes:
# *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required*
- area: upstream
change: |
Use previously calculated healthcheck when endpoints move priority levels repeatedly.
- area: tracing
change: |
Removed support for (long deprecated) opentracing. See `issue 27401
Expand Down Expand Up @@ -72,6 +69,9 @@ minor_behavior_changes:
bug_fixes:
# *Changes expected to improve the state of the world and are unlikely to have negative effects*
- area: upstream
change: |
Use previously calculated healthcheck when endpoints move priority levels repeatedly.
- area: dns
change: |
The DNS filter no longer returns FORMERR if a message has an ID of 0.
Expand Down
12 changes: 5 additions & 7 deletions source/common/upstream/upstream_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -988,16 +988,14 @@ void MainPrioritySetImpl::updateCrossPriorityHostMap(uint32_t priority,
}

for (const auto& host : hosts_removed) {
auto existing_host = mutable_cross_priority_host_map_->find(addressToString(host->address()));

const auto host_address = addressToString(host->address());
const auto existing_host = mutable_cross_priority_host_map_->find(host_address);
if (existing_host != mutable_cross_priority_host_map_->end()) {
// an earlier priority added this host, which indicates that the host moved from
// An earlier priority added this host, which indicates that the host moved from
// a higher value priority to a lower value priority. We should not remove it from the
// cross priority map.
if (existing_host->second->priority() < priority) {
continue;
} else {
mutable_cross_priority_host_map_->erase(addressToString(host->address()));
if (existing_host->second->priority() >= priority) {
mutable_cross_priority_host_map_->erase(host_address);
}
}
}
Expand Down
166 changes: 57 additions & 109 deletions test/extensions/clusters/eds/eds_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1403,132 +1403,80 @@ TEST_F(EdsTest, EndpointMovedToNewPriority) {
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
}
}

// 1 -> 2
// Moves all the endpoints to priority 1.
cluster_load_assignment.clear_endpoints();
add_endpoint(80, 2);
add_endpoint(81, 2);

doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);

{
// Priority 0 and 1 should now be empty.
auto& hosts1 = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();
EXPECT_EQ(hosts1.size(), 0);
auto& hosts2 = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();
EXPECT_EQ(hosts2.size(), 0);
}

{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[2]->hosts();
EXPECT_EQ(hosts.size(), 2);

// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
}

// 2 -> 3
cluster_load_assignment.clear_endpoints();
add_endpoint(80, 3);
add_endpoint(81, 3);

doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);

{
// Priority 0, 1, and 2 should now be empty.
auto& hosts1 = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();
EXPECT_EQ(hosts1.size(), 0);
auto& hosts2 = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();
EXPECT_EQ(hosts2.size(), 0);
auto& hosts3 = cluster_->prioritySet().hostSetsPerPriority()[2]->hosts();
EXPECT_EQ(hosts3.size(), 0);
}

{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[3]->hosts();
EXPECT_EQ(hosts.size(), 2);

// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
}

// 3 -> 2
cluster_load_assignment.clear_endpoints();
add_endpoint(80, 2);
add_endpoint(81, 2);

doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);
// Verifies that if a endpoint is moved to a new priority multiple times, the health
// check value is preserved.
TEST_F(EdsTest, EndpointMovedToNewPriorityRepeated) {
envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;
cluster_load_assignment.set_cluster_name("fare");
resetCluster();

{
// Priority 3 should now be empty.
auto& hosts1 = cluster_->prioritySet().hostSetsPerPriority()[3]->hosts();
EXPECT_EQ(hosts1.size(), 0);
}
auto health_checker = std::make_shared<MockHealthChecker>();
EXPECT_CALL(*health_checker, start());
EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);
cluster_->setHealthChecker(health_checker);

{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[2]->hosts();
EXPECT_EQ(hosts.size(), 2);
auto add_endpoint = [&cluster_load_assignment](int port, int priority) {
auto* endpoints = cluster_load_assignment.add_endpoints();
endpoints->set_priority(priority);

// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
}
auto* socket_address = endpoints->add_lb_endpoints()
->mutable_endpoint()
->mutable_address()
->mutable_socket_address();
socket_address->set_address("1.2.3.4");
socket_address->set_port_value(port);
};

// 2 -> 1
cluster_load_assignment.clear_endpoints();
add_endpoint(80, 1);
add_endpoint(81, 1);
add_endpoint(80, 0);
add_endpoint(81, 0);

doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);

{
// Priority 2 should now be empty.
auto& hosts1 = cluster_->prioritySet().hostSetsPerPriority()[2]->hosts();
EXPECT_EQ(hosts1.size(), 0);
}

{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();
EXPECT_EQ(hosts.size(), 2);

// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
// Mark the hosts as healthy
for (auto& host : hosts) {
EXPECT_TRUE(host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);
host->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);
}
}

// 1 -> 0
cluster_load_assignment.clear_endpoints();
add_endpoint(80, 0);
add_endpoint(81, 0);
std::vector<uint32_t> priority_levels = {1, 2, 3, 2, 1};
for (uint32_t priority : priority_levels) {
cluster_load_assignment.clear_endpoints();
add_endpoint(80, priority);
add_endpoint(81, priority);

doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);
doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);

{
// Priority 1 should now be empty.
auto& hosts1 = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();
EXPECT_EQ(hosts1.size(), 0);
}
{
for (uint32_t i = 0; i < cluster_->prioritySet().hostSetsPerPriority().size(); i++) {
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[i]->hosts();
if (i == priority) {
// Priorities equal to this one should have the endpoints with port 80 and 81
EXPECT_EQ(hosts.size(), 2);
} else {
// Priorities not equal to this one should now be empty.
EXPECT_EQ(hosts.size(), 0);
}
}
}

{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();
EXPECT_EQ(hosts.size(), 2);
// Confirm that as priority level change, successful healthcheck status was preserved
{
auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[priority]->hosts();

// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
// The endpoints were healthy, so moving them around should preserve that.
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));
}
}
}

Expand Down

0 comments on commit a871846

Please sign in to comment.