From d4db44e2df25a1c26bdcf314ef732cf2722d6690 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Mon, 13 May 2024 22:57:22 +0000 Subject: [PATCH 1/9] [muxorch] Using bulker to program routes/neighbors during switchover Uses entity bulker to program routes and neighbors during mux switchover. Mux switchover performance suffers when switching over with a large number of neighbors on the mux port. This uses the optimization of programming the neighbors and routes in bulk to avoid sequentially programming each. Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 206 ++++++++++++++-- orchagent/muxorch.h | 28 ++- orchagent/neighorch.cpp | 519 ++++++++++++++++++++++++++++++++++++++++ orchagent/neighorch.h | 23 ++ tests/test_mux.py | 61 +++++ 5 files changed, 817 insertions(+), 20 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index ea3ade347c..5c529a309d 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -744,6 +744,8 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; + std::list bulk_neigh_ctx_list; + std::list bulk_route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -751,12 +753,18 @@ bool MuxNbrHandler::enable(bool update_rt) SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->enableNeighbor(neigh)) - { - SWSS_LOG_INFO("Enabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + bulk_neigh_ctx_list.push_back(NeighborBulkContext(neigh, true)); + it++; + } + + if (!gNeighOrch->createBulkNeighborEntries(bulk_neigh_ctx_list) || !gNeighOrch->flushBulkNeighborEntries(bulk_neigh_ctx_list)) + { + return false; + } + it = neighbors_.begin(); + while (it != neighbors_.end()) + { /* Update NH to point to learned neighbor */ it->second = gNeighOrch->getLocalNextHopId(neigh); @@ -795,22 +803,48 @@ bool MuxNbrHandler::enable(bool update_rt) IpPrefix pfx = it->first.to_string(); if (update_rt) { - if (remove_route(pfx) != SAI_STATUS_SUCCESS) - { - return false; - } - updateTunnelRoute(nh_key, false); + bulk_route_ctx_list.push_back(MuxRouteBulkContext(pfx, false)); } it++; } + if (update_rt) + { + if (!createBulkRouteEntries(bulk_route_ctx_list)) + { + gRouteBulker.clear(); + return false; + } + gRouteBulker.flush(); + if (!processBulkRouteEntries(bulk_route_ctx_list)) + { + gRouteBulker.clear(); + return false; + } + + it = neighbors_.begin(); + while (it != neighbors_.end()) + { + NextHopKey nh_key = NextHopKey(it->first, alias_); + if (update_rt) + { + updateTunnelRoute(nh_key, false); + } + + it++; + } + } + + gRouteBulker.clear(); return true; } bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; + std::list bulk_neigh_ctx_list; + std::list bulk_route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -852,21 +886,32 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); - if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) - { - return false; - } + bulk_route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second, true)); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->disableNeighbor(neigh)) - { - SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + bulk_neigh_ctx_list.push_back(NeighborBulkContext(neigh, false)); it++; } + if (!createBulkRouteEntries(bulk_route_ctx_list)) + { + gRouteBulker.clear(); + return false; + } + gRouteBulker.flush(); + if (!processBulkRouteEntries(bulk_route_ctx_list)) + { + gRouteBulker.clear(); + return false; + } + + if (!gNeighOrch->createBulkNeighborEntries(bulk_neigh_ctx_list) || !gNeighOrch->flushBulkNeighborEntries(bulk_neigh_ctx_list)) + { + return false; + } + + gRouteBulker.clear(); return true; } @@ -881,6 +926,129 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } +bool MuxNbrHandler::createBulkRouteEntries(std::list& bulk_ctx_list) +{ + int count = 0; + + SWSS_LOG_INFO("Creating %d bulk route entries", (int)bulk_ctx_list.size()); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Creating route entry %s, nh %" PRIx64 ", add:%d", ctx->pfx.getIp().to_string().c_str(), ctx->nh, ctx->add); + + object_statuses.emplace_back(); + if (ctx->add) + { + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); + + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = ctx->nh; + attrs.push_back(attr); + + sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_ERROR("Failed to create add entry for tunnel route in bulker object, entry already exists %s,nh %" PRIx64, + ctx->pfx.getIp().to_string().c_str(), ctx->nh); + continue; + } + } + else + { + sai_status_t status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_ERROR("Failed to create remove entry for tunnel route in bulker object, entry already exists %s,nh %" PRIx64, + ctx->pfx.getIp().to_string().c_str(), ctx->nh); + continue; + } + } + count++; + } + + SWSS_LOG_INFO("Successfully created %d bulk neighbor entries", count); + return true; +} + +bool MuxNbrHandler::processBulkRouteEntries(std::list& bulk_ctx_list) +{ + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (ctx->add) + { + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_NOTICE("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", + ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); + return false; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + else + { + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_NOTICE("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", + ctx->pfx.getIp().to_string().c_str(), status); + return false; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); + return status; + } + } + return true; +} + void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) { MuxOrch* mux_orch = gDirectory.get(); diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 22f01ce27d..4f4ab8117c 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -10,6 +10,7 @@ #include "tunneldecaporch.h" #include "aclorch.h" #include "neighorch.h" +#include "bulker.h" enum MuxState { @@ -35,6 +36,27 @@ enum MuxCableType ACTIVE_ACTIVE }; +struct MuxRouteBulkContext +{ + std::deque object_statuses; // Bulk statuses + IpPrefix pfx; // Route prefix + sai_object_id_t nh; // nexthop id + bool add; // add route bool + + MuxRouteBulkContext(IpPrefix pfx, bool add) + : pfx(pfx), add(add) + { + } + + MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh, bool add) + : pfx(pfx), nh(nh), add(add) + { + } +}; + +extern size_t gMaxBulkSize; +extern sai_route_api_t* sai_route_api; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -64,7 +86,7 @@ typedef std::map MuxNeighbor; class MuxNbrHandler { public: - MuxNbrHandler() = default; + MuxNbrHandler() : gRouteBulker(sai_route_api, gMaxBulkSize) {}; bool enable(bool update_rt); bool disable(sai_object_id_t); @@ -75,11 +97,15 @@ class MuxNbrHandler string getAlias() const { return alias_; }; private: + bool createBulkRouteEntries(std::list& bulk_ctx_list); + bool processBulkRouteEntries(std::list& bulk_ctx_list); + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; string alias_; + EntityBulker gRouteBulker; }; // Mux Cable object diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index a2bdebbc62..07e21b0400 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -22,10 +22,12 @@ extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern BfdOrch *gBfdOrch; +extern size_t gMaxBulkSize; const int neighorch_pri = 30; NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, FdbOrch *fdbOrch, PortsOrch *portsOrch, DBConnector *chassisAppDb) : + gNeighBulker(sai_neighbor_api, gMaxBulkSize), Orch(appDb, tableName, neighorch_pri), m_intfsOrch(intfsOrch), m_fdbOrch(fdbOrch), @@ -1199,6 +1201,434 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) return true; } +/** + * @brief Creates a neighbor add entry and adds it to bulker. + * @param ctx NeighborBulkContext contains neighbor information and list of object statuses. + */ +bool NeighOrch::addBulkNeighbor(NeighborBulkContext& ctx) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + if (rif_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); + return false; + } + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + vector neighbor_attrs; + sai_attribute_t neighbor_attr; + + neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS; + memcpy(neighbor_attr.value.mac, macAddress.getMac(), 6); + neighbor_attrs.push_back(neighbor_attr); + + if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) + { + /* Check if this prefix is a configured ip, if not allow */ + IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); + if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) + { + neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; + neighbor_attr.value.booldata = 1; + neighbor_attrs.push_back(neighbor_attr); + } + } + + PortsOrch* ports_orch = gDirectory.get(); + auto vlan_ports = ports_orch->getAllVlans(); + + for (auto vlan_port: vlan_ports) + { + if (vlan_port == alias) + { + continue; + } + NeighborEntry temp_entry = { ip_address, vlan_port }; + if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) + { + SWSS_LOG_NOTICE("Neighbor %s on %s already exists, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); + if (!removeNeighbor(temp_entry)) + { + SWSS_LOG_ERROR("Failed to create remove neighbor entry %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + return false; + } + } + } + + if (gMySwitchType == "voq") + { + if (!addVoqEncapIndex(alias, ip_address, neighbor_attrs)) + { + return false; + } + } + + bool hw_config = isHwConfigured(neighborEntry); + MuxOrch* mux_orch = gDirectory.get(); + if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) + { + object_statuses.emplace_back(); + status = gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("Neighbor add entry %s already exists in bulker.", ip_address.to_string().c_str()); + return true; + } + } + else if (hw_config) + { + ctx.set_neigh_attr_count = (int)neighbor_attrs.size(); + for (int i = 0; i < ctx.set_neigh_attr_count; i++) + { + object_statuses.emplace_back(); + gNeighBulker.set_entry_attribute(&object_statuses.back(), &neighbor_entry, neighbor_attrs.data()); + } + } + + return true; +} + +/** + * @brief Checks statuses of bulker add operations. + * @param ctx NeighborBulkContext contains NeighborEntry and status list + */ +bool NeighOrch::addBulkNeighborPost(NeighborBulkContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + SWSS_LOG_INFO("Checking neighbor entry %s on %s status.", ip_address.to_string().c_str(), alias.c_str()); + + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + if (rif_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); + return false; + } + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + bool hw_config = isHwConfigured(neighborEntry); + MuxOrch* mux_orch = gDirectory.get(); + if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) + { + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_ERROR("Neighbor exists: neighbor %s on %s, skipping: status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + /* Returning True so as to skip retry */ + return true; + } + else + { + SWSS_LOG_ERROR("Failed to create neighbor %s on %s, status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + SWSS_LOG_NOTICE("Created neighbor ip %s, %s on %s", ip_address.to_string().c_str(), + macAddress.to_string().c_str(), alias.c_str()); + + m_intfsOrch->increaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + if (!addNextHop(NextHopKey(ip_address, alias))) + { + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + return false; + } + hw_config = true; + } + else if (hw_config) + { + for (int i = 0; i < ctx.set_neigh_attr_count; i++) + { + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Bulker failed to update neighbor %s on %s, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + SWSS_LOG_NOTICE("Bulker updated neighbor %s on %s", macAddress.to_string().c_str(), alias.c_str()); + } + + m_syncdNeighbors[neighborEntry] = { macAddress, hw_config }; + + NeighborUpdate update = { neighborEntry, macAddress, true }; + notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + if(gMySwitchType == "voq") + { + //Sync the neighbor to add to the CHASSIS_APP_DB + voqSyncAddNeigh(alias, ip_address, macAddress, neighbor_entry); + } + + return true; +} + +/** + * @brief Creates a neighbor remove entry and adds it to bulker. + * @param ctx NeighborBulkContext contains neighbor information and list of object statuses. + */ +bool NeighOrch::removeBulkNeighbor(NeighborBulkContext& ctx) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + NextHopKey nexthop = { ip_address, alias }; + if(m_intfsOrch->isRemoteSystemPortIntf(alias)) + { + //For remote system ports kernel nexthops are always on inband. Change the key + Port inbp; + gPortsOrch->getInbandPort(inbp); + assert(inbp.m_alias.length()); + + nexthop.alias = inbp.m_alias; + } + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + return true; + } + + if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end() && m_syncdNextHops[nexthop].ref_count > 0) + { + SWSS_LOG_INFO("Failed to remove still referenced neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + return false; + } + + if (isHwConfigured(neighborEntry)) + { + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + sai_object_id_t next_hop_id = m_syncdNextHops[nexthop].next_hop_id; + status = sai_next_hop_api->remove_next_hop(next_hop_id); + if (status != SAI_STATUS_SUCCESS) + { + /* When next hop is not found, we continue to remove neighbor entry. */ + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Next hop %s on %s doesn't exist, rv:%d", + ip_address.to_string().c_str(), alias.c_str(), status); + } + else + { + SWSS_LOG_ERROR("Failed to remove next hop %s on %s, rv:%d", + ip_address.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEXT_HOP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (status != SAI_STATUS_ITEM_NOT_FOUND) + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEXTHOP); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEXTHOP); + } + } + + SWSS_LOG_NOTICE("Removed next hop %s on %s", + ip_address.to_string().c_str(), alias.c_str()); + + object_statuses.emplace_back(); + status = gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s: already exists in bulker", ip_address.to_string().c_str()); + return false; + } + } + + return true; +} + +/** + * @brief Checks statuses of bulker remove operations. + * @param ctx NeighborBulkContext contains NeighborEntry and status list + */ +bool NeighOrch::removeBulkNeighborPost(NeighborBulkContext& ctx, bool disable) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + return true; + } + + SWSS_LOG_NOTICE("Removing neighbor %s on %s", ip_address.to_string().c_str(), + m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); + + if (object_statuses.empty()) + { + return true; + } + + if (isHwConfigured(neighborEntry)) + { + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Bulker skipped, neighbor %s on %s already removed, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + } + else + { + SWSS_LOG_ERROR("Bulker failed to remove neighbor %s on %s, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + else + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } + } + + + /* Do not delete entry from cache if its disable request */ + if (disable) + { + m_syncdNeighbors[neighborEntry].hw_configured = false; + return true; + } + + m_syncdNeighbors.erase(neighborEntry); + + NeighborUpdate update = { neighborEntry, MacAddress(), false }; + notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + if(gMySwitchType == "voq") + { + //Sync the neighbor to delete from the CHASSIS_APP_DB + voqSyncDelNeigh(alias, ip_address); + } + + return true; +} + bool NeighOrch::isHwConfigured(const NeighborEntry& neighborEntry) { if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) @@ -1247,6 +1677,95 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return removeNeighbor(neighborEntry, true); } +/** + * @brief Enters neighbor entries into neighbor bulker. + * @param bulk_ctx_list List of NeighborBulkContext entries to add to bulker. + */ +bool NeighOrch::createBulkNeighborEntries(std::list& bulk_ctx_list) +{ + int count = 0; + + SWSS_LOG_INFO("Creating %d bulk neighbor entries", (int)bulk_ctx_list.size()); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + if (ctx->enable && isHwConfigured(neighborEntry)) + { + SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + if (ctx->enable) + { + SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!addBulkNeighbor(*ctx)) + { + SWSS_LOG_INFO("Adding bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); + return false; + } + } + else + { + SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!removeBulkNeighbor(*ctx)) + { + SWSS_LOG_INFO("Removing bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); + return false; + } + } + count++; + } + SWSS_LOG_INFO("Successfully created %d bulk neighbor entries", count); + return true; +} + +/** + * @brief Processes neighbor entries in bulker. + * @param bulk_ctx_list List of neighbor context entries to be processed. + */ +bool NeighOrch::flushBulkNeighborEntries(std::list& bulk_ctx_list) +{ + SWSS_LOG_INFO("Processing %d bulk add neighbor entries", (int)bulk_ctx_list.size()); + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (ctx->enable) + { + if (!addBulkNeighborPost(*ctx)) + { + SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + gNeighBulker.clear(); + return false; + } + } + else + { + if (!removeBulkNeighborPost(*ctx, true)) + { + gNeighBulker.clear(); + return false; + } + } + } + + SWSS_LOG_INFO("Succeeded in processing %d bulk add neighbor entries", (int)bulk_ctx_list.size()); + gNeighBulker.clear(); + return true; +} + sai_object_id_t NeighOrch::addTunnelNextHop(const NextHopKey& nh) { SWSS_LOG_ENTER(); diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index e72979ad07..1152b74b85 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -12,6 +12,7 @@ #include "producerstatetable.h" #include "schema.h" #include "bfdorch.h" +#include "bulker.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -43,6 +44,20 @@ struct NeighborUpdate bool add; }; +struct NeighborBulkContext +{ + std::deque object_statuses; // Bulk statuses + NeighborEntry neighborEntry; // Neighbor entry to process + MacAddress mac; // neighbor mac + bool enable; // enable/disable + int set_neigh_attr_count = 0; // Keeps track of number of attr set + + NeighborBulkContext(NeighborEntry neighborEntry, bool enable) + : neighborEntry(neighborEntry), enable(enable) + { + } +}; + class NeighOrch : public Orch, public Subject, public Observer { public: @@ -66,6 +81,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool enableNeighbor(const NeighborEntry&); bool disableNeighbor(const NeighborEntry&); + bool createBulkNeighborEntries(std::list&); + bool flushBulkNeighborEntries(std::list&); bool isHwConfigured(const NeighborEntry&); sai_object_id_t addTunnelNextHop(const NextHopKey&); @@ -93,10 +110,16 @@ class NeighOrch : public Orch, public Subject, public Observer std::set m_neighborToResolve; + EntityBulker gNeighBulker; + bool removeNextHop(const IpAddress&, const string&); bool addNeighbor(const NeighborEntry&, const MacAddress&); bool removeNeighbor(const NeighborEntry&, bool disable = false); + bool addBulkNeighbor(NeighborBulkContext& ctx); + bool addBulkNeighborPost(NeighborBulkContext& ctx); + bool removeBulkNeighbor(NeighborBulkContext& ctx); + bool removeBulkNeighborPost(NeighborBulkContext& ctx, bool disable = false); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/tests/test_mux.py b/tests/test_mux.py index 207ec6741b..c81565e022 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -100,6 +100,8 @@ class TestMuxTunnelBase(): DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + BULK_NEIGHBOR_COUNT = 254 + def check_syslog(self, dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) @@ -336,8 +338,67 @@ def del_route(self, dvs, route): ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) ps._del(route) + def wait_for_mux_state(self, dvs, interface, expected_state): + """ + Waits until state change completes - expected state is in state_db + """ + + apdb = dvs.get_app_db() + expected_field = {"state": expected_state} + apdb.wait_for_field_match(self.APP_MUX_CABLE, interface, expected_field) + + def bulk_neighbor_test(self, confdb, appdb, asicdb, dvs, dvs_route): + dvs.runcmd("swssloglevel -l INFO -c orchagent") + dvs.runcmd("ip neigh flush all") + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-11-11") + self.set_mux_state(appdb, "Ethernet0", "active") + + class neighbor_info: + ipv4_key = "" + ipv6_key = "" + ipv4 = "" + ipv6 = "" + + def __init__(self, i): + self.ipv4 = "192.168.1." + str(i) + self.ipv6 = "fc02:1001::" + str(i) + + neighbor_list = [neighbor_info(i) for i in range(100, self.BULK_NEIGHBOR_COUNT)] + for neigh_info in neighbor_list: + self.add_neighbor(dvs, neigh_info.ipv4, "00:00:00:00:11:11") + self.add_neighbor(dvs, neigh_info.ipv6, "00:00:00:00:11:11") + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + try: + self.set_mux_state(appdb, "Ethernet0", "standby") + self.wait_for_mux_state(dvs, "Ethernet0", "standby") + + for neigh_info in neighbor_list: + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv4_key) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv6_key) + dvs_route.check_asicdb_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + + self.set_mux_state(appdb, "Ethernet0", "active") + self.wait_for_mux_state(dvs, "Ethernet0", "active") + + for neigh_info in neighbor_list: + dvs_route.check_asicdb_deleted_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + finally: + for neigh_info in neighbor_list: + self.del_neighbor(dvs, neigh_info.ipv4) + self.del_neighbor(dvs, neigh_info.ipv6) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): + self.bulk_neighbor_test(confdb, appdb, asicdb, dvs, dvs_route) self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") From 11e5ccebf2aed4ad1e932272a1886103c1716286 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Thu, 23 May 2024 06:02:03 +0000 Subject: [PATCH 2/9] fixing aclorch_ut Signed-off-by: Nikola Dancejic --- tests/mock_tests/aclorch_ut.cpp | 3 +++ tests/mock_tests/bulker_ut.cpp | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 8005199935..4a92d65c80 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -24,6 +24,7 @@ extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; extern sai_route_api_t *sai_route_api; +extern sai_route_api_t *sai_neighbor_api; extern sai_mpls_api_t *sai_mpls_api; extern sai_next_hop_group_api_t* sai_next_hop_group_api; extern string gMySwitchType; @@ -318,6 +319,7 @@ namespace aclorch_test sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); sai_api_query(SAI_API_ROUTE, (void **)&sai_route_api); + sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); sai_api_query(SAI_API_MPLS, (void **)&sai_mpls_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); @@ -490,6 +492,7 @@ namespace aclorch_test sai_vlan_api = nullptr; sai_bridge_api = nullptr; sai_route_api = nullptr; + sai_neighbor_api = nullptr; sai_mpls_api = nullptr; } diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index 6210cc0969..dc5ad78776 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -2,6 +2,7 @@ #include "bulker.h" extern sai_route_api_t *sai_route_api; +extern sai_neighbor_api_t *sai_neighbor_api; namespace bulker_test { @@ -17,12 +18,18 @@ namespace bulker_test { ASSERT_EQ(sai_route_api, nullptr); sai_route_api = new sai_route_api_t(); + + ASSERT_EQ(sai_neighbor_api, nullptr); + sai_neighbor_api = new sai_neighbor_api_t(); } void TearDown() override { delete sai_route_api; sai_route_api = nullptr; + + delete sai_neighbor_api; + sai_neighbor_api = nullptr; } }; @@ -142,4 +149,28 @@ namespace bulker_test // Confirm route entry is not pending removal ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); } + + TEST_F(BulkerTest, NeighborBulker) + { + // Create bulker + EntityBulker gNeighBulker(sai_neighbor_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gNeighBulker.max_bulk_size, 1000); + + // Create a dummy neighbor entry + sai_neighbor_entry_t neighbor_entry_remove; + neighbor_entry_remove.ip_address.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + neighbor_entry_remove.ip_address.addr.ip4 = 0x10000001; + neighbor_entry_remove.rif_id = 0x0; + neighbor_entry_remove.switch_id = 0x0; + + // Put neighbor entry into remove + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry_remove); + + // Confirm neighbor entry is pending removal + ASSERT_TRUE(gNeighBulker.bulk_entry_pending_removal(neighbor_entry_remove)); + } } From 2b3240b4b6368768cfb6a0927321f4a534d2bbd3 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Thu, 23 May 2024 09:14:29 +0000 Subject: [PATCH 3/9] skip MuxRollbackTests until create_neighbor_entries api is supported Signed-off-by: Nikola Dancejic --- tests/mock_tests/mux_rollback_ut.cpp | 221 ++++++++++++++------------- 1 file changed, 111 insertions(+), 110 deletions(-) diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index 933c27aca2..a5441fe4f7 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -133,114 +133,115 @@ namespace mux_rollback_test } }; - TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) - { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(ACTIVE_STATE); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(STANDBY_STATE); - } - - TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) - { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(ACTIVE_STATE); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(STANDBY_STATE); - } - - TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) - { - EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(ACTIVE_STATE); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(STANDBY_STATE); - } - - TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) - { - EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(ACTIVE_STATE); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(STANDBY_STATE); - } - - TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) - { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - .WillOnce(Throw(runtime_error("Mock runtime error"))); - SetMuxStateFromAppDb(ACTIVE_STATE); - EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) - .WillOnce(Throw(runtime_error("Mock runtime error"))); - SetMuxStateFromAppDb(STANDBY_STATE); - EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - } - - TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) - { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - .WillOnce(Throw(logic_error("Mock logic error"))); - SetMuxStateFromAppDb(ACTIVE_STATE); - EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - .WillOnce(Throw(logic_error("Mock logic error"))); - SetMuxStateFromAppDb(STANDBY_STATE); - EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - } - - TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) - { - EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) - .WillOnce(Throw(exception())); - SetMuxStateFromAppDb(ACTIVE_STATE); - EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - } - - TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) - { - SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) - .WillOnce(Throw(exception())); - SetMuxStateFromAppDb(STANDBY_STATE); - EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - } + /* TODO: change expected api call to be create_neighbor_entries */ + // TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) + // { + // EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + // SetAndAssertMuxState(ACTIVE_STATE); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + // SetAndAssertMuxState(STANDBY_STATE); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) + // { + // EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + // SetAndAssertMuxState(ACTIVE_STATE); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_route_api, create_route_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + // SetAndAssertMuxState(STANDBY_STATE); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) + // { + // EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + // SetAndAssertMuxState(ACTIVE_STATE); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) + // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + // SetAndAssertMuxState(STANDBY_STATE); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) + // { + // EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + // SetAndAssertMuxState(ACTIVE_STATE); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + // SetAndAssertMuxState(STANDBY_STATE); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) + // { + // EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + // .WillOnce(Throw(runtime_error("Mock runtime error"))); + // SetMuxStateFromAppDb(ACTIVE_STATE); + // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_route_api, create_route_entry) + // .WillOnce(Throw(runtime_error("Mock runtime error"))); + // SetMuxStateFromAppDb(STANDBY_STATE); + // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) + // { + // EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + // .WillOnce(Throw(logic_error("Mock logic error"))); + // SetMuxStateFromAppDb(ACTIVE_STATE); + // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + // .WillOnce(Throw(logic_error("Mock logic error"))); + // SetMuxStateFromAppDb(STANDBY_STATE); + // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + // } + + // TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) + // { + // EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + // .WillOnce(Throw(exception())); + // SetMuxStateFromAppDb(ACTIVE_STATE); + // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + // } + + // TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) + // { + // SetAndAssertMuxState(ACTIVE_STATE); + // EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + // .WillOnce(Throw(exception())); + // SetMuxStateFromAppDb(STANDBY_STATE); + // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + // } } From f259759537903a20eec5eb5b3c331b12747cd68b Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Sat, 25 May 2024 01:32:24 +0000 Subject: [PATCH 4/9] [mux_rollback_ut.cpp] fixing mux_rollback_ut tests for bulk switchover bulk switchover feature chages the sai_api calls in switchover flow. Because of this mux_rollback_ut tests fail. This PR adds bulk operations to mock_sai_neighbor and mock_sai_api, and adjusts the expected function calls in mux_rollback_ut. Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 3 +- orchagent/p4orch/tests/mock_sai_neighbor.h | 18 ++ .../p4orch/tests/neighbor_manager_test.cpp | 2 + tests/mock_tests/mock_sai_api.h | 54 +++- tests/mock_tests/mux_rollback_ut.cpp | 243 ++++++++++-------- 5 files changed, 193 insertions(+), 127 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 5c529a309d..3a64c20416 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -978,7 +978,7 @@ bool MuxNbrHandler::createBulkRouteEntries(std::list& bulk_ count++; } - SWSS_LOG_INFO("Successfully created %d bulk neighbor entries", count); + SWSS_LOG_INFO("Successfully created %d bulk route entries", count); return true; } @@ -1043,7 +1043,6 @@ bool MuxNbrHandler::processBulkRouteEntries(std::list& bulk } SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); - return status; } } return true; diff --git a/orchagent/p4orch/tests/mock_sai_neighbor.h b/orchagent/p4orch/tests/mock_sai_neighbor.h index cd8f2aa0a9..4355831d36 100644 --- a/orchagent/p4orch/tests/mock_sai_neighbor.h +++ b/orchagent/p4orch/tests/mock_sai_neighbor.h @@ -16,6 +16,12 @@ class MockSaiNeighbor MOCK_METHOD1(remove_neighbor_entry, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry)); + MOCK_METHOD6(create_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses)); + MOCK_METHOD2(set_neighbor_entry_attribute, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr)); @@ -37,6 +43,18 @@ sai_status_t mock_remove_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbo return mock_sai_neighbor->remove_neighbor_entry(neighbor_entry); } +sai_status_t mock_create_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->create_neighbor_entries(object_count, neighbor_entry, attr_count, attr_list, mode, object_statuses); +} + +sai_status_t mock_remove_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->remove_neighbor_entries(object_count, neighbor_entry, mode, object_statuses); +} + sai_status_t mock_set_neighbor_entry_attribute(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr) { diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index 4db1db873e..7523701cb7 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -124,6 +124,8 @@ class NeighborManagerTest : public ::testing::Test mock_sai_neighbor = &mock_sai_neighbor_; sai_neighbor_api->create_neighbor_entry = mock_create_neighbor_entry; sai_neighbor_api->remove_neighbor_entry = mock_remove_neighbor_entry; + sai_neighbor_api->create_neighbor_entries = mock_create_neighbor_entries; + sai_neighbor_api->remove_neighbor_entries = mock_remove_neighbor_entries; sai_neighbor_api->set_neighbor_entry_attribute = mock_set_neighbor_entry_attribute; sai_neighbor_api->get_neighbor_entry_attribute = mock_get_neighbor_entry_attribute; } diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h index 7819b5b126..58e3c9da23 100644 --- a/tests/mock_tests/mock_sai_api.h +++ b/tests/mock_tests/mock_sai_api.h @@ -24,8 +24,12 @@ EXTERN_MOCK_FNS #define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry +#define CREATE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses +#define REMOVE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ sai_bulk_op_error_mode_t mode, _In_ sai_status_t *object_statuses #define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list #define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define CREATE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, attr_count, attr_list, mode, object_statuses +#define REMOVE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, mode, object_statuses #define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id #define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list @@ -42,8 +46,8 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the 7. Define a method to remove the mock */ #define DEFINE_SAI_API_MOCK(sai_object_type) \ - static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ - static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ class mock_sai_##sai_object_type##_api_t \ { \ public: \ @@ -59,20 +63,40 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the [this](REMOVE_PARAMS(sai_object_type)) { \ return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ }); \ + ON_CALL(*this, create_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](CREATE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](REMOVE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + }); \ } \ MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ + MOCK_METHOD6(create_##sai_object_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_object_type))); \ + MOCK_METHOD4(remove_##sai_object_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ - inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ + inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_object_type##_api_mock() \ + inline sai_status_t mock_create_##sai_object_type##_entries(CREATE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##_entries(REMOVE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_object_type##_api_mock() \ { \ mock_sai_##sai_object_type##_api = new NiceMock(); \ \ @@ -82,16 +106,18 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the \ sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ + sai_##sai_object_type##_api->create_##sai_object_type##_entries = mock_create_##sai_object_type##_entries; \ + sai_##sai_object_type##_api->remove_##sai_object_type##_entries = mock_remove_##sai_object_type##_entries; \ } \ - inline void remove_sai_##sai_object_type##_api_mock() \ + inline void remove_sai_##sai_object_type##_api_mock() \ { \ sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ delete mock_sai_##sai_object_type##_api; \ } #define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ - static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ - static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ class mock_sai_##sai_api_name##_api_t \ { \ public: \ @@ -111,16 +137,16 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ - inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_api_name##_api_mock() \ + inline void apply_sai_##sai_api_name##_api_mock() \ { \ mock_sai_##sai_api_name##_api = new NiceMock(); \ \ @@ -131,7 +157,7 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ } \ - inline void remove_sai_##sai_api_name##_api_mock() \ + inline void remove_sai_##sai_api_name##_api_mock() \ { \ sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ delete mock_sai_##sai_api_name##_api; \ diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index a5441fe4f7..1d9855a5c4 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -5,6 +5,10 @@ #include "orch.h" #undef protected #include "ut_helper.h" +#define private public +#include "neighorch.h" +#include "muxorch.h" +#undef private #include "mock_orchagent_main.h" #include "mock_sai_api.h" #include "mock_orch_test.h" @@ -19,6 +23,7 @@ namespace mux_rollback_test DEFINE_SAI_API_MOCK(route); DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + using ::testing::_; using namespace std; using namespace mock_orch_test; using ::testing::Return; @@ -26,6 +31,11 @@ namespace mux_rollback_test static const string TEST_INTERFACE = "Ethernet4"; + sai_bulk_create_neighbor_entry_fn old_create_neighbor_entries; + sai_bulk_remove_neighbor_entry_fn old_remove_neighbor_entries; + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + class MuxRollbackTest : public MockOrchTest { protected: @@ -125,123 +135,134 @@ namespace mux_rollback_test INIT_SAI_API_MOCK(acl); INIT_SAI_API_MOCK(next_hop); MockSaiApis(); + old_create_neighbor_entries = gNeighOrch->gNeighBulker.create_entries; + old_remove_neighbor_entries = gNeighOrch->gNeighBulker.remove_entries; + old_create_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.create_entries; + old_remove_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.remove_entries; + gNeighOrch->gNeighBulker.create_entries = mock_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = mock_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = mock_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = mock_remove_route_entries; } void PreTearDown() override { RestoreSaiApis(); + gNeighOrch->gNeighBulker.create_entries = old_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = old_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = old_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = old_remove_route_entries; } }; - /* TODO: change expected api call to be create_neighbor_entries */ - // TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) - // { - // EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - // SetAndAssertMuxState(ACTIVE_STATE); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - // SetAndAssertMuxState(STANDBY_STATE); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) - // { - // EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - // SetAndAssertMuxState(ACTIVE_STATE); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_route_api, create_route_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - // SetAndAssertMuxState(STANDBY_STATE); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) - // { - // EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - // SetAndAssertMuxState(ACTIVE_STATE); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) - // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - // SetAndAssertMuxState(STANDBY_STATE); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) - // { - // EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) - // .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - // SetAndAssertMuxState(ACTIVE_STATE); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) - // .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - // SetAndAssertMuxState(STANDBY_STATE); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) - // { - // EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - // .WillOnce(Throw(runtime_error("Mock runtime error"))); - // SetMuxStateFromAppDb(ACTIVE_STATE); - // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_route_api, create_route_entry) - // .WillOnce(Throw(runtime_error("Mock runtime error"))); - // SetMuxStateFromAppDb(STANDBY_STATE); - // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) - // { - // EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - // .WillOnce(Throw(logic_error("Mock logic error"))); - // SetMuxStateFromAppDb(ACTIVE_STATE); - // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - // .WillOnce(Throw(logic_error("Mock logic error"))); - // SetMuxStateFromAppDb(STANDBY_STATE); - // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - // } - - // TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) - // { - // EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) - // .WillOnce(Throw(exception())); - // SetMuxStateFromAppDb(ACTIVE_STATE); - // EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); - // } - - // TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) - // { - // SetAndAssertMuxState(ACTIVE_STATE); - // EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) - // .WillOnce(Throw(exception())); - // SetMuxStateFromAppDb(STANDBY_STATE); - // EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); - // } + TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) + { + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } } From e318676623d3486a566a9fae28a6ad001588c86b Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Fri, 31 May 2024 09:44:41 +0000 Subject: [PATCH 5/9] [muxorch] Removing INFO logging from tests and ensuring neigh is set Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 1 + tests/test_mux.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 3a64c20416..4cd0e7dbe7 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -766,6 +766,7 @@ bool MuxNbrHandler::enable(bool update_rt) while (it != neighbors_.end()) { /* Update NH to point to learned neighbor */ + neigh = NeighborEntry(it->first, alias_); it->second = gNeighOrch->getLocalNextHopId(neigh); /* Reprogram route */ diff --git a/tests/test_mux.py b/tests/test_mux.py index 94e4e306e6..fce1b4f37c 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -349,7 +349,6 @@ def wait_for_mux_state(self, dvs, interface, expected_state): apdb.wait_for_field_match(self.APP_MUX_CABLE, interface, expected_field) def bulk_neighbor_test(self, confdb, appdb, asicdb, dvs, dvs_route): - dvs.runcmd("swssloglevel -l INFO -c orchagent") dvs.runcmd("ip neigh flush all") self.add_fdb(dvs, "Ethernet0", "00-00-00-00-11-11") self.set_mux_state(appdb, "Ethernet0", "active") From 411da0f271ad0805a02875a01bfa9c28b5517ab6 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Mon, 3 Jun 2024 19:33:35 +0000 Subject: [PATCH 6/9] [muxorch] Refactoring bulk switchover - using existing add/removeNeighbor functions to do bulker operations. - Created addNeighborPost and removeNeighborPost - Created addRoutes and removeRoutes to deal with bulk operations - Created enableNeighbors and disableNeighbors to handle bulk switchover. Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 231 ++++++++------- orchagent/muxorch.h | 13 +- orchagent/neighorch.cpp | 421 ++++++++++----------------- orchagent/neighorch.h | 37 ++- orchagent/p4orch/tests/mock_bulker.h | 49 ++++ 5 files changed, 354 insertions(+), 397 deletions(-) create mode 100644 orchagent/p4orch/tests/mock_bulker.h diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 4cd0e7dbe7..aa4aea2f87 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -744,8 +744,8 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; - std::list bulk_neigh_ctx_list; - std::list bulk_route_ctx_list; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -753,11 +753,12 @@ bool MuxNbrHandler::enable(bool update_rt) SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); neigh = NeighborEntry(it->first, alias_); - bulk_neigh_ctx_list.push_back(NeighborBulkContext(neigh, true)); + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); it++; } - if (!gNeighOrch->createBulkNeighborEntries(bulk_neigh_ctx_list) || !gNeighOrch->flushBulkNeighborEntries(bulk_neigh_ctx_list)) + if (!gNeighOrch->enableNeighbors(neigh_ctx_list)) { return false; } @@ -804,7 +805,7 @@ bool MuxNbrHandler::enable(bool update_rt) IpPrefix pfx = it->first.to_string(); if (update_rt) { - bulk_route_ctx_list.push_back(MuxRouteBulkContext(pfx, false)); + route_ctx_list.push_back(MuxRouteBulkContext(pfx)); } it++; @@ -812,15 +813,8 @@ bool MuxNbrHandler::enable(bool update_rt) if (update_rt) { - if (!createBulkRouteEntries(bulk_route_ctx_list)) + if (!removeRoutes(route_ctx_list)) { - gRouteBulker.clear(); - return false; - } - gRouteBulker.flush(); - if (!processBulkRouteEntries(bulk_route_ctx_list)) - { - gRouteBulker.clear(); return false; } @@ -844,8 +838,8 @@ bool MuxNbrHandler::enable(bool update_rt) bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; - std::list bulk_neigh_ctx_list; - std::list bulk_route_ctx_list; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -887,32 +881,25 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); - bulk_route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second, true)); + route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second)); neigh = NeighborEntry(it->first, alias_); - bulk_neigh_ctx_list.push_back(NeighborBulkContext(neigh, false)); + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); it++; } - if (!createBulkRouteEntries(bulk_route_ctx_list)) - { - gRouteBulker.clear(); - return false; - } - gRouteBulker.flush(); - if (!processBulkRouteEntries(bulk_route_ctx_list)) + if (!addRoutes(route_ctx_list)) { - gRouteBulker.clear(); return false; } - if (!gNeighOrch->createBulkNeighborEntries(bulk_neigh_ctx_list) || !gNeighOrch->flushBulkNeighborEntries(bulk_neigh_ctx_list)) + if (!gNeighOrch->disableNeighbors(neigh_ctx_list)) { return false; } - gRouteBulker.clear(); return true; } @@ -927,13 +914,13 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } -bool MuxNbrHandler::createBulkRouteEntries(std::list& bulk_ctx_list) +bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) { - int count = 0; - - SWSS_LOG_INFO("Creating %d bulk route entries", (int)bulk_ctx_list.size()); + sai_status_t status; + bool ret = true; - for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + auto ctx = bulk_ctx_list.begin(); + while (ctx != bulk_ctx_list.end()) { auto& object_statuses = ctx->object_statuses; sai_route_entry_t route_entry; @@ -942,54 +929,120 @@ bool MuxNbrHandler::createBulkRouteEntries(std::list& bulk_ copy(route_entry.destination, ctx->pfx); subnet(route_entry.destination, route_entry.destination); - SWSS_LOG_INFO("Creating route entry %s, nh %" PRIx64 ", add:%d", ctx->pfx.getIp().to_string().c_str(), ctx->nh, ctx->add); + SWSS_LOG_INFO("Adding route entry %s, nh %" PRIx64 " to bulker", ctx->pfx.getIp().to_string().c_str(), ctx->nh); object_statuses.emplace_back(); - if (ctx->add) + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); + + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = ctx->nh; + attrs.push_back(attr); + + status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { - sai_attribute_t attr; - vector attrs; + // entry exists in bulker, erase and continue + ctx = bulk_ctx_list.erase(ctx); + continue; + } - attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - attr.value.s32 = SAI_PACKET_ACTION_FORWARD; - attrs.push_back(attr); + ctx++; + } - attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - attr.value.oid = ctx->nh; - attrs.push_back(attr); + gRouteBulker.flush(); - sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - SWSS_LOG_ERROR("Failed to create add entry for tunnel route in bulker object, entry already exists %s,nh %" PRIx64, - ctx->pfx.getIp().to_string().c_str(), ctx->nh); + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + + if (object_statuses.empty()) + { + continue; + } + + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_INFO("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); continue; } + SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", + ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); } else { - sai_status_t status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - SWSS_LOG_ERROR("Failed to create remove entry for tunnel route in bulker object, entry already exists %s,nh %" PRIx64, - ctx->pfx.getIp().to_string().c_str(), ctx->nh); - continue; - } + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } - count++; + + SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); } - SWSS_LOG_INFO("Successfully created %d bulk route entries", count); - return true; + gRouteBulker.clear(); + return ret; } -bool MuxNbrHandler::processBulkRouteEntries(std::list& bulk_ctx_list) +bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) { + sai_status_t status; + bool ret = true; + + auto ctx = bulk_ctx_list.begin(); + while (ctx != bulk_ctx_list.end()) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Removing route entry %s, nh %" PRIx64 "", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); + if (status == SAI_STATUS_SUCCESS) + { + // entry already removed, clear from list and continue + ctx = bulk_ctx_list.erase(ctx); + continue; + } + ctx++; + } + + gRouteBulker.flush(); + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { auto& object_statuses = ctx->object_statuses; + + if (object_statuses.empty()) + { + continue; + } + auto it_status = object_statuses.begin(); - sai_status_t status = *it_status++; + status = *it_status++; sai_route_entry_t route_entry; route_entry.switch_id = gSwitchId; @@ -997,56 +1050,32 @@ bool MuxNbrHandler::processBulkRouteEntries(std::list& bulk copy(route_entry.destination, ctx->pfx); subnet(route_entry.destination, route_entry.destination); - if (ctx->add) + if (status != SAI_STATUS_SUCCESS) { - if (status != SAI_STATUS_SUCCESS) - { - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { - SWSS_LOG_NOTICE("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); - continue; - } - SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", - ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); - return false; - } - - if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) - { - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); - } - else - { - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_INFO("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); + continue; } + SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", + ctx->pfx.getIp().to_string().c_str(), status); + ret = false; + continue; + } - SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); } else { - if (status != SAI_STATUS_SUCCESS) - { - if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_NOTICE("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); - continue; - } - SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", - ctx->pfx.getIp().to_string().c_str(), status); - return false; - } - - if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); - } - else - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); - } - - SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } + + SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); } - return true; + + gRouteBulker.clear(); + return ret; } void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 4f4ab8117c..3a6d165db4 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -41,15 +41,14 @@ struct MuxRouteBulkContext std::deque object_statuses; // Bulk statuses IpPrefix pfx; // Route prefix sai_object_id_t nh; // nexthop id - bool add; // add route bool - MuxRouteBulkContext(IpPrefix pfx, bool add) - : pfx(pfx), add(add) + MuxRouteBulkContext(IpPrefix pfx) + : pfx(pfx) { } - MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh, bool add) - : pfx(pfx), nh(nh), add(add) + MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh) + : pfx(pfx), nh(nh) { } }; @@ -97,8 +96,8 @@ class MuxNbrHandler string getAlias() const { return alias_; }; private: - bool createBulkRouteEntries(std::list& bulk_ctx_list); - bool processBulkRouteEntries(std::list& bulk_ctx_list); + bool removeRoutes(std::list& bulk_ctx_list); + bool addRoutes(std::list& bulk_ctx_list); inline void updateTunnelRoute(NextHopKey, bool = true); diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index e14f8c0294..3b7daa9abf 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -795,6 +795,8 @@ void NeighOrch::doTask(Consumer &consumer) NeighborEntry neighbor_entry = { ip_address, alias }; + NeighborContext ctx = NeighborContext(neighbor_entry); + if (op == SET_COMMAND) { Port p; @@ -820,6 +822,8 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } + ctx.mac = mac_address; + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { @@ -848,7 +852,7 @@ void NeighOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } - else if (addNeighbor(neighbor_entry, mac_address)) + else if (addNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -879,7 +883,7 @@ void NeighOrch::doTask(Consumer &consumer) { if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { - if (removeNeighbor(neighbor_entry)) + if (removeNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -900,13 +904,18 @@ void NeighOrch::doTask(Consumer &consumer) } } -bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress &macAddress) +bool NeighOrch::addNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; IpAddress ip_address = neighborEntry.ip_address; string alias = neighborEntry.alias; + bool bulk_op = ctx.bulk_op; sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -975,7 +984,8 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress SWSS_LOG_NOTICE("Neighbor %s already learned on %s in VRF %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str(), vrf_name.c_str()); } - if (!removeNeighbor(temp_entry)) + NeighborContext removeContext = NeighborContext(temp_entry); + if (!removeNeighbor(removeContext)) { SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); return false; @@ -997,6 +1007,20 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) { + // Using bulker, return and post-process later + if (bulk_op) + { + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + object_statuses.emplace_back(); + status = gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + // clear object_statuses so that this neighbor entry is skipped + object_statuses.clear(); + } + return true; + } + status = sai_neighbor_api->create_neighbor_entry(&neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -1062,8 +1086,16 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } else if (isHwConfigured(neighborEntry)) { + ctx.set_neigh_attr_count = (int)neighbor_attrs.size(); for (auto itr : neighbor_attrs) { + if (bulk_op) + { + object_statuses.emplace_back(); + gNeighBulker.set_entry_attribute(&object_statuses.back(), &neighbor_entry, &itr); + return true; + } + status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); if (status != SAI_STATUS_SUCCESS) { @@ -1093,13 +1125,17 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress return true; } -bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) +bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) { SWSS_LOG_ENTER(); sai_status_t status; - IpAddress ip_address = neighborEntry.ip_address; + auto& object_statuses = ctx.object_statuses; + + const NeighborEntry neighborEntry = ctx.neighborEntry; string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + bool bulk_op = ctx.bulk_op; NextHopKey nexthop = { ip_address, alias }; if(m_intfsOrch->isRemoteSystemPortIntf(alias)) @@ -1170,6 +1206,18 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) SWSS_LOG_NOTICE("Removed next hop %s on %s", ip_address.to_string().c_str(), alias.c_str()); + if (bulk_op) + { + object_statuses.emplace_back(); + status = gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); + if (status == SAI_STATUS_SUCCESS) + { + // clear object_statuses so that this neighbor entry is skipped + object_statuses.clear(); + } + return true; + } + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); if (status != SAI_STATUS_SUCCESS) { @@ -1229,114 +1277,7 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) return true; } -/** - * @brief Creates a neighbor add entry and adds it to bulker. - * @param ctx NeighborBulkContext contains neighbor information and list of object statuses. - */ -bool NeighOrch::addBulkNeighbor(NeighborBulkContext& ctx) -{ - SWSS_LOG_ENTER(); - - sai_status_t status; - auto& object_statuses = ctx.object_statuses; - - const MacAddress &macAddress = ctx.mac; - const NeighborEntry neighborEntry = ctx.neighborEntry; - string alias = neighborEntry.alias; - IpAddress ip_address = neighborEntry.ip_address; - - SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); - - sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); - if (rif_id == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); - return false; - } - - sai_neighbor_entry_t neighbor_entry; - neighbor_entry.rif_id = rif_id; - neighbor_entry.switch_id = gSwitchId; - copy(neighbor_entry.ip_address, ip_address); - - vector neighbor_attrs; - sai_attribute_t neighbor_attr; - - neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS; - memcpy(neighbor_attr.value.mac, macAddress.getMac(), 6); - neighbor_attrs.push_back(neighbor_attr); - - if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) - { - /* Check if this prefix is a configured ip, if not allow */ - IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); - if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) - { - neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; - neighbor_attr.value.booldata = 1; - neighbor_attrs.push_back(neighbor_attr); - } - } - - PortsOrch* ports_orch = gDirectory.get(); - auto vlan_ports = ports_orch->getAllVlans(); - - for (auto vlan_port: vlan_ports) - { - if (vlan_port == alias) - { - continue; - } - NeighborEntry temp_entry = { ip_address, vlan_port }; - if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) - { - SWSS_LOG_NOTICE("Neighbor %s on %s already exists, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); - if (!removeNeighbor(temp_entry)) - { - SWSS_LOG_ERROR("Failed to create remove neighbor entry %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); - return false; - } - } - } - - if (gMySwitchType == "voq") - { - if (!addVoqEncapIndex(alias, ip_address, neighbor_attrs)) - { - return false; - } - } - - bool hw_config = isHwConfigured(neighborEntry); - MuxOrch* mux_orch = gDirectory.get(); - if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) - { - object_statuses.emplace_back(); - status = gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - SWSS_LOG_NOTICE("Neighbor add entry %s already exists in bulker.", ip_address.to_string().c_str()); - return true; - } - } - else if (hw_config) - { - ctx.set_neigh_attr_count = (int)neighbor_attrs.size(); - for (int i = 0; i < ctx.set_neigh_attr_count; i++) - { - object_statuses.emplace_back(); - gNeighBulker.set_entry_attribute(&object_statuses.back(), &neighbor_entry, neighbor_attrs.data()); - } - } - - return true; -} - -/** - * @brief Checks statuses of bulker add operations. - * @param ctx NeighborBulkContext contains NeighborEntry and status list - */ -bool NeighOrch::addBulkNeighborPost(NeighborBulkContext& ctx) +bool NeighOrch::addNeighborPost(NeighborContext& ctx) { SWSS_LOG_ENTER(); @@ -1349,7 +1290,7 @@ bool NeighOrch::addBulkNeighborPost(NeighborBulkContext& ctx) string alias = neighborEntry.alias; IpAddress ip_address = neighborEntry.ip_address; - SWSS_LOG_INFO("Checking neighbor entry %s on %s status.", ip_address.to_string().c_str(), alias.c_str()); + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -1374,7 +1315,6 @@ bool NeighOrch::addBulkNeighborPost(NeighborBulkContext& ctx) { SWSS_LOG_ERROR("Neighbor exists: neighbor %s on %s, skipping: status:%s", macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); - /* Returning True so as to skip retry */ return true; } else @@ -1464,107 +1404,7 @@ bool NeighOrch::addBulkNeighborPost(NeighborBulkContext& ctx) return true; } -/** - * @brief Creates a neighbor remove entry and adds it to bulker. - * @param ctx NeighborBulkContext contains neighbor information and list of object statuses. - */ -bool NeighOrch::removeBulkNeighbor(NeighborBulkContext& ctx) -{ - SWSS_LOG_ENTER(); - - sai_status_t status; - auto& object_statuses = ctx.object_statuses; - - const NeighborEntry neighborEntry = ctx.neighborEntry; - string alias = neighborEntry.alias; - IpAddress ip_address = neighborEntry.ip_address; - - NextHopKey nexthop = { ip_address, alias }; - if(m_intfsOrch->isRemoteSystemPortIntf(alias)) - { - //For remote system ports kernel nexthops are always on inband. Change the key - Port inbp; - gPortsOrch->getInbandPort(inbp); - assert(inbp.m_alias.length()); - - nexthop.alias = inbp.m_alias; - } - - if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) - { - return true; - } - - if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end() && m_syncdNextHops[nexthop].ref_count > 0) - { - SWSS_LOG_INFO("Failed to remove still referenced neighbor %s on %s", - m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); - return false; - } - - if (isHwConfigured(neighborEntry)) - { - sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); - - sai_neighbor_entry_t neighbor_entry; - neighbor_entry.rif_id = rif_id; - neighbor_entry.switch_id = gSwitchId; - copy(neighbor_entry.ip_address, ip_address); - - sai_object_id_t next_hop_id = m_syncdNextHops[nexthop].next_hop_id; - status = sai_next_hop_api->remove_next_hop(next_hop_id); - if (status != SAI_STATUS_SUCCESS) - { - /* When next hop is not found, we continue to remove neighbor entry. */ - if (status == SAI_STATUS_ITEM_NOT_FOUND) - { - SWSS_LOG_NOTICE("Next hop %s on %s doesn't exist, rv:%d", - ip_address.to_string().c_str(), alias.c_str(), status); - } - else - { - SWSS_LOG_ERROR("Failed to remove next hop %s on %s, rv:%d", - ip_address.to_string().c_str(), alias.c_str(), status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEXT_HOP, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - } - - if (status != SAI_STATUS_ITEM_NOT_FOUND) - { - if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEXTHOP); - } - else - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEXTHOP); - } - } - - SWSS_LOG_NOTICE("Removed next hop %s on %s", - ip_address.to_string().c_str(), alias.c_str()); - - object_statuses.emplace_back(); - status = gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - SWSS_LOG_ERROR("Failed to remove neighbor %s: already exists in bulker", ip_address.to_string().c_str()); - return false; - } - } - - return true; -} - -/** - * @brief Checks statuses of bulker remove operations. - * @param ctx NeighborBulkContext contains NeighborEntry and status list - */ -bool NeighOrch::removeBulkNeighborPost(NeighborBulkContext& ctx, bool disable) +bool NeighOrch::removeNeighborPost(NeighborContext& ctx, bool disable) { SWSS_LOG_ENTER(); @@ -1683,7 +1523,11 @@ bool NeighOrch::enableNeighbor(const NeighborEntry& neighborEntry) return true; } - return addNeighbor(neighborEntry, m_syncdNeighbors[neighborEntry].mac); + NeighborEntry neigh = neighborEntry; + NeighborContext ctx = NeighborContext(neigh); + ctx.mac = m_syncdNeighbors[neighborEntry].mac; + + return addNeighbor(ctx); } bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) @@ -1702,96 +1546,121 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return true; } - return removeNeighbor(neighborEntry, true); + NeighborContext ctx = NeighborContext(neighborEntry); + + return removeNeighbor(ctx, true); } -/** - * @brief Enters neighbor entries into neighbor bulker. - * @param bulk_ctx_list List of NeighborBulkContext entries to add to bulker. - */ -bool NeighOrch::createBulkNeighborEntries(std::list& bulk_ctx_list) +bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) { - int count = 0; - - SWSS_LOG_INFO("Creating %d bulk neighbor entries", (int)bulk_ctx_list.size()); + bool ret = true; - for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + auto ctx = bulk_ctx_list.begin(); + while (ctx != bulk_ctx_list.end()) { + if (!ctx->bulk_op) + { + SWSS_LOG_ERROR("Calling enableNeighbors with bulking disabled is not supported"); + ctx++; + continue; + } const NeighborEntry& neighborEntry = ctx->neighborEntry; ctx->mac = m_syncdNeighbors[neighborEntry].mac; if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) { SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + ctx++; continue; } - if (ctx->enable && isHwConfigured(neighborEntry)) + if (isHwConfigured(neighborEntry)) { SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); + ctx++; continue; } - if (ctx->enable) - { - SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); + SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); - if(!addBulkNeighbor(*ctx)) - { - SWSS_LOG_INFO("Adding bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); - return false; - } - } - else + if(!addNeighbor(*ctx)) { - SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + SWSS_LOG_INFO("Adding bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* remove from list, and continue */ + ctx = bulk_ctx_list.erase(ctx); + continue; + } - if(!removeBulkNeighbor(*ctx)) - { - SWSS_LOG_INFO("Removing bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); - return false; - } + ctx++; + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!addNeighborPost(*ctx)) + { + SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries */ + ret = false; } - count++; } - SWSS_LOG_INFO("Successfully created %d bulk neighbor entries", count); - return true; + + gNeighBulker.clear(); + return ret; } -/** - * @brief Processes neighbor entries in bulker. - * @param bulk_ctx_list List of neighbor context entries to be processed. - */ -bool NeighOrch::flushBulkNeighborEntries(std::list& bulk_ctx_list) +bool NeighOrch::disableNeighbors(std::list& bulk_ctx_list) { - SWSS_LOG_INFO("Processing %d bulk add neighbor entries", (int)bulk_ctx_list.size()); - gNeighBulker.flush(); + bool ret = true; - for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + auto ctx = bulk_ctx_list.begin(); + while (ctx != bulk_ctx_list.end()) { + if (!ctx->bulk_op) + { + SWSS_LOG_ERROR("Calling disableNeighbors with bulking disabled is not supported"); + ctx++; + continue; + } const NeighborEntry& neighborEntry = ctx->neighborEntry; - if (ctx->enable) + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) { - if (!addBulkNeighborPost(*ctx)) - { - SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); - gNeighBulker.clear(); - return false; - } + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + ctx++; + continue; } - else + + SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!removeNeighbor(*ctx)) { - if (!removeBulkNeighborPost(*ctx, true)) - { - gNeighBulker.clear(); - return false; - } + SWSS_LOG_INFO("Removing bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* remove from list, and continue */ + ctx = bulk_ctx_list.erase(ctx); + continue; + } + ctx++; + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!removeNeighborPost(*ctx, true)) + { + SWSS_LOG_INFO("Disable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries */ + ret = false; } } - SWSS_LOG_INFO("Succeeded in processing %d bulk add neighbor entries", (int)bulk_ctx_list.size()); gNeighBulker.clear(); - return true; + return ret; } sai_object_id_t NeighOrch::addTunnelNextHop(const NextHopKey& nh) @@ -1973,7 +1842,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_NOTICE("VOQ encap index set failed for neighbor %s. Removing and re-adding", kfvKey(t).c_str()); //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -2004,7 +1874,9 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) } //Add neigh to SAI - if (addNeighbor(neighbor_entry, mac_address)) + NeighborContext ctx = NeighborContext(neighbor_entry); + ctx.mac = mac_address; + if (addNeighbor(ctx)) { //neigh successfully added to SAI. Set STATE DB to signal kernel programming by neighbor manager @@ -2057,7 +1929,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index b9733fe611..179354fcb7 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -44,16 +44,25 @@ struct NeighborUpdate bool add; }; -struct NeighborBulkContext +/* + * Keeps track of neighbor entry information primarily for bulk operations + */ +struct NeighborContext { - std::deque object_statuses; // Bulk statuses - NeighborEntry neighborEntry; // Neighbor entry to process + NeighborEntry neighborEntry; // neighbor entry to process + std::deque object_statuses; // bulk statuses MacAddress mac; // neighbor mac - bool enable; // enable/disable - int set_neigh_attr_count = 0; // Keeps track of number of attr set - NeighborBulkContext(NeighborEntry neighborEntry, bool enable) - : neighborEntry(neighborEntry), enable(enable) + bool bulk_op = false; // use bulker (only for mux use for now) + int set_neigh_attr_count = 0; // number of attr set + + NeighborContext(NeighborEntry neighborEntry) + : neighborEntry(neighborEntry) + { + } + + NeighborContext(NeighborEntry neighborEntry, bool bulk_op) + : neighborEntry(neighborEntry), bulk_op(bulk_op) { } }; @@ -81,8 +90,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool enableNeighbor(const NeighborEntry&); bool disableNeighbor(const NeighborEntry&); - bool createBulkNeighborEntries(std::list&); - bool flushBulkNeighborEntries(std::list&); + bool enableNeighbors(std::list&); + bool disableNeighbors(std::list&); bool isHwConfigured(const NeighborEntry&); sai_object_id_t addTunnelNextHop(const NextHopKey&); @@ -116,12 +125,10 @@ class NeighOrch : public Orch, public Subject, public Observer bool removeNextHop(const IpAddress&, const string&); - bool addNeighbor(const NeighborEntry&, const MacAddress&); - bool removeNeighbor(const NeighborEntry&, bool disable = false); - bool addBulkNeighbor(NeighborBulkContext& ctx); - bool addBulkNeighborPost(NeighborBulkContext& ctx); - bool removeBulkNeighbor(NeighborBulkContext& ctx); - bool removeBulkNeighborPost(NeighborBulkContext& ctx, bool disable = false); + bool addNeighbor(NeighborContext& ctx); + bool removeNeighbor(NeighborContext& ctx, bool disable = false); + bool addNeighborPost(NeighborContext& ctx); + bool removeNeighborPost(NeighborContext& ctx, bool disable = false); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/orchagent/p4orch/tests/mock_bulker.h b/orchagent/p4orch/tests/mock_bulker.h new file mode 100644 index 0000000000..087bb70394 --- /dev/null +++ b/orchagent/p4orch/tests/mock_bulker.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#define MOCK_MAX_BULK_SIZE 1000 + +extern "C" +{ +#include "sai.h" +} + +class MockNeighborBulker +{ + public: + MOCK_METHOD4(create_entry, sai_status_t(_Out_ sai_status_t *object_status, _In_ const sai_neighbor_entry_t *entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list)); + + MOCK_METHOD2(remove_entry, sai_status_t(_Out_ sai_status_t *object_status, _In_ const sai_neighbor_entry_t *entry)); + + MOCK_METHOD0(flush, void()); + + MOCK_METHOD0(clear, void()); +}; + +MockNeighborBulker *mock_neighbor_bulker; + +sai_status_t mock_create_entry( + _Out_ sai_status_t *object_status, + _In_ const sai_neighbor_entry_t *entry, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) +{ + return mock_neighbor_bulker->create_entry(object_status, entry, attr_count, attr_list); +} + +sai_status_t mock_remove_entry( + _Out_ sai_status_t *object_status, + _In_ const sai_neighbor_entry_t *entry) +{ + return mock_neighbor_bulker->remove_entry(object_status, entry); +} + +void mock_flush() +{ + return mock_neighbor_bulker->flush(); +} + +void mock_clear() +{ + return mock_neighbor_bulker->clear(); +} \ No newline at end of file From 31e0dd9c56e22a0ae1bb9a3c486314c27f87467d Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Mon, 3 Jun 2024 20:51:46 +0000 Subject: [PATCH 7/9] Addressing PR comments Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index aa4aea2f87..c045729ff4 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -808,27 +808,14 @@ bool MuxNbrHandler::enable(bool update_rt) route_ctx_list.push_back(MuxRouteBulkContext(pfx)); } + updateTunnelRoute(nh_key, false); + it++; } - if (update_rt) + if (update_rt && !removeRoutes(route_ctx_list)) { - if (!removeRoutes(route_ctx_list)) - { - return false; - } - - it = neighbors_.begin(); - while (it != neighbors_.end()) - { - NextHopKey nh_key = NextHopKey(it->first, alias_); - if (update_rt) - { - updateTunnelRoute(nh_key, false); - } - - it++; - } + return false; } gRouteBulker.clear(); From c823cb2ae31280b949f082535737337b47674c9b Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Tue, 4 Jun 2024 08:04:09 +0000 Subject: [PATCH 8/9] Removing un-needed code and expanding test coverage Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 33 +----- orchagent/neighorch.cpp | 153 +++++++-------------------- orchagent/neighorch.h | 4 +- orchagent/p4orch/tests/mock_bulker.h | 49 --------- tests/mock_tests/mux_rollback_ut.cpp | 22 +++- 5 files changed, 63 insertions(+), 198 deletions(-) delete mode 100644 orchagent/p4orch/tests/mock_bulker.h diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index c045729ff4..bcee8569c1 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -906,8 +906,7 @@ bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) sai_status_t status; bool ret = true; - auto ctx = bulk_ctx_list.begin(); - while (ctx != bulk_ctx_list.end()) + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { auto& object_statuses = ctx->object_statuses; sai_route_entry_t route_entry; @@ -931,14 +930,6 @@ bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) attrs.push_back(attr); status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - // entry exists in bulker, erase and continue - ctx = bulk_ctx_list.erase(ctx); - continue; - } - - ctx++; } gRouteBulker.flush(); @@ -946,12 +937,6 @@ bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { auto& object_statuses = ctx->object_statuses; - - if (object_statuses.empty()) - { - continue; - } - auto it_status = object_statuses.begin(); status = *it_status++; @@ -994,8 +979,7 @@ bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) sai_status_t status; bool ret = true; - auto ctx = bulk_ctx_list.begin(); - while (ctx != bulk_ctx_list.end()) + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { auto& object_statuses = ctx->object_statuses; sai_route_entry_t route_entry; @@ -1008,13 +992,6 @@ bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) object_statuses.emplace_back(); status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); - if (status == SAI_STATUS_SUCCESS) - { - // entry already removed, clear from list and continue - ctx = bulk_ctx_list.erase(ctx); - continue; - } - ctx++; } gRouteBulker.flush(); @@ -1022,12 +999,6 @@ bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { auto& object_statuses = ctx->object_statuses; - - if (object_statuses.empty()) - { - continue; - } - auto it_status = object_statuses.begin(); status = *it_status++; diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 3b7daa9abf..ce817b026f 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -1012,12 +1012,7 @@ bool NeighOrch::addNeighbor(NeighborContext& ctx) { SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); object_statuses.emplace_back(); - status = gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); - if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) - { - // clear object_statuses so that this neighbor entry is skipped - object_statuses.clear(); - } + gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); return true; } @@ -1089,13 +1084,6 @@ bool NeighOrch::addNeighbor(NeighborContext& ctx) ctx.set_neigh_attr_count = (int)neighbor_attrs.size(); for (auto itr : neighbor_attrs) { - if (bulk_op) - { - object_statuses.emplace_back(); - gNeighBulker.set_entry_attribute(&object_statuses.back(), &neighbor_entry, &itr); - return true; - } - status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); if (status != SAI_STATUS_SUCCESS) { @@ -1209,12 +1197,7 @@ bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) if (bulk_op) { object_statuses.emplace_back(); - status = gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); - if (status == SAI_STATUS_SUCCESS) - { - // clear object_statuses so that this neighbor entry is skipped - object_statuses.clear(); - } + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); return true; } @@ -1277,7 +1260,8 @@ bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) return true; } -bool NeighOrch::addNeighborPost(NeighborContext& ctx) +/* Process bulk ctx entry and enable the neigbor */ +bool NeighOrch::processBulkEnableNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); @@ -1290,7 +1274,13 @@ bool NeighOrch::addNeighborPost(NeighborContext& ctx) string alias = neighborEntry.alias; IpAddress ip_address = neighborEntry.ip_address; - SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + if (!ctx.bulk_op) + { + SWSS_LOG_INFO("Not a bulk entry for %s on %s", ip_address.to_string().c_str(), alias.c_str()); + return true; + } + + SWSS_LOG_INFO("Checking neighbor create entry status %s on %s.", ip_address.to_string().c_str(), alias.c_str()); sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -1304,16 +1294,15 @@ bool NeighOrch::addNeighborPost(NeighborContext& ctx) neighbor_entry.switch_id = gSwitchId; copy(neighbor_entry.ip_address, ip_address); - bool hw_config = isHwConfigured(neighborEntry); MuxOrch* mux_orch = gDirectory.get(); - if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) + if (mux_orch->isNeighborActive(ip_address, macAddress, alias)) { status = *it_status++; if (status != SAI_STATUS_SUCCESS) { if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { - SWSS_LOG_ERROR("Neighbor exists: neighbor %s on %s, skipping: status:%s", + SWSS_LOG_INFO("Neighbor exists: neighbor %s on %s, skipping: status:%s", macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); return true; } @@ -1369,42 +1358,18 @@ bool NeighOrch::addNeighborPost(NeighborContext& ctx) return false; } - hw_config = true; - } - else if (hw_config) - { - for (int i = 0; i < ctx.set_neigh_attr_count; i++) - { - status = *it_status++; - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Bulker failed to update neighbor %s on %s, rv:%d", - macAddress.to_string().c_str(), alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - } - SWSS_LOG_NOTICE("Bulker updated neighbor %s on %s", macAddress.to_string().c_str(), alias.c_str()); } - m_syncdNeighbors[neighborEntry] = { macAddress, hw_config }; + m_syncdNeighbors[neighborEntry] = { macAddress, true }; NeighborUpdate update = { neighborEntry, macAddress, true }; notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); - if(gMySwitchType == "voq") - { - //Sync the neighbor to add to the CHASSIS_APP_DB - voqSyncAddNeigh(alias, ip_address, macAddress, neighbor_entry); - } - return true; } -bool NeighOrch::removeNeighborPost(NeighborContext& ctx, bool disable) +/* Process bulk ctx entry and disable the neigbor */ +bool NeighOrch::processBulkDisableNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); @@ -1421,13 +1386,7 @@ bool NeighOrch::removeNeighborPost(NeighborContext& ctx, bool disable) return true; } - SWSS_LOG_NOTICE("Removing neighbor %s on %s", ip_address.to_string().c_str(), - m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); - - if (object_statuses.empty()) - { - return true; - } + SWSS_LOG_INFO("Checking neighbor remove entry status %s on %s.", ip_address.to_string().c_str(), m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); if (isHwConfigured(neighborEntry)) { @@ -1443,12 +1402,12 @@ bool NeighOrch::removeNeighborPost(NeighborContext& ctx, bool disable) { if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_NOTICE("Bulker skipped, neighbor %s on %s already removed, rv:%d", + SWSS_LOG_NOTICE("Bulk remove entry skipped, neighbor %s on %s already removed, rv:%d", m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); } else { - SWSS_LOG_ERROR("Bulker failed to remove neighbor %s on %s, rv:%d", + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); if (handle_status != task_success) @@ -1475,25 +1434,8 @@ bool NeighOrch::removeNeighborPost(NeighborContext& ctx, bool disable) } } - - /* Do not delete entry from cache if its disable request */ - if (disable) - { - m_syncdNeighbors[neighborEntry].hw_configured = false; - return true; - } - - m_syncdNeighbors.erase(neighborEntry); - - NeighborUpdate update = { neighborEntry, MacAddress(), false }; - notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); - - if(gMySwitchType == "voq") - { - //Sync the neighbor to delete from the CHASSIS_APP_DB - voqSyncDelNeigh(alias, ip_address); - } - + /* Do not delete entry from cache for disable request */ + m_syncdNeighbors[neighborEntry].hw_configured = false; return true; } @@ -1551,33 +1493,25 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return removeNeighbor(ctx, true); } +/* enable neighbors using bulker */ bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) { bool ret = true; - auto ctx = bulk_ctx_list.begin(); - while (ctx != bulk_ctx_list.end()) + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { - if (!ctx->bulk_op) - { - SWSS_LOG_ERROR("Calling enableNeighbors with bulking disabled is not supported"); - ctx++; - continue; - } const NeighborEntry& neighborEntry = ctx->neighborEntry; ctx->mac = m_syncdNeighbors[neighborEntry].mac; if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) { SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); - ctx++; continue; } if (isHwConfigured(neighborEntry)) { SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); - ctx++; continue; } @@ -1585,21 +1519,22 @@ bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) if(!addNeighbor(*ctx)) { - SWSS_LOG_INFO("Adding bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); - /* remove from list, and continue */ - ctx = bulk_ctx_list.erase(ctx); + SWSS_LOG_ERROR("Neighbor %s create entry failed.", neighborEntry.ip_address.to_string().c_str()); continue; } - - ctx++; } gNeighBulker.flush(); for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { + if (ctx->object_statuses.empty()) + { + continue; + } + const NeighborEntry& neighborEntry = ctx->neighborEntry; - if (!addNeighborPost(*ctx)) + if (!processBulkEnableNeighbor(*ctx)) { SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); /* finish processing bulk entries */ @@ -1611,50 +1546,44 @@ bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) return ret; } +/* disable neighbors using bulker */ bool NeighOrch::disableNeighbors(std::list& bulk_ctx_list) { bool ret = true; - auto ctx = bulk_ctx_list.begin(); - while (ctx != bulk_ctx_list.end()) + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { - if (!ctx->bulk_op) - { - SWSS_LOG_ERROR("Calling disableNeighbors with bulking disabled is not supported"); - ctx++; - continue; - } const NeighborEntry& neighborEntry = ctx->neighborEntry; ctx->mac = m_syncdNeighbors[neighborEntry].mac; if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) { SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); - ctx++; continue; } SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); - if(!removeNeighbor(*ctx)) + if(!removeNeighbor(*ctx, true)) { - SWSS_LOG_INFO("Removing bulk neighbor entry failed for %s", neighborEntry.ip_address.to_string().c_str()); - /* remove from list, and continue */ - ctx = bulk_ctx_list.erase(ctx); - continue; + SWSS_LOG_ERROR("Neighbor %s remove entry failed.", neighborEntry.ip_address.to_string().c_str()); } - ctx++; } gNeighBulker.flush(); for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) { + if (ctx->object_statuses.empty()) + { + continue; + } + const NeighborEntry& neighborEntry = ctx->neighborEntry; - if (!removeNeighborPost(*ctx, true)) + if (!processBulkDisableNeighbor(*ctx)) { SWSS_LOG_INFO("Disable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); - /* finish processing bulk entries */ + /* finish processing bulk entries but return false */ ret = false; } } diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index 179354fcb7..e6e1f380c4 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -127,8 +127,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool addNeighbor(NeighborContext& ctx); bool removeNeighbor(NeighborContext& ctx, bool disable = false); - bool addNeighborPost(NeighborContext& ctx); - bool removeNeighborPost(NeighborContext& ctx, bool disable = false); + bool processBulkEnableNeighbor(NeighborContext& ctx); + bool processBulkDisableNeighbor(NeighborContext& ctx); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/orchagent/p4orch/tests/mock_bulker.h b/orchagent/p4orch/tests/mock_bulker.h deleted file mode 100644 index 087bb70394..0000000000 --- a/orchagent/p4orch/tests/mock_bulker.h +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once - -#include -#define MOCK_MAX_BULK_SIZE 1000 - -extern "C" -{ -#include "sai.h" -} - -class MockNeighborBulker -{ - public: - MOCK_METHOD4(create_entry, sai_status_t(_Out_ sai_status_t *object_status, _In_ const sai_neighbor_entry_t *entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list)); - - MOCK_METHOD2(remove_entry, sai_status_t(_Out_ sai_status_t *object_status, _In_ const sai_neighbor_entry_t *entry)); - - MOCK_METHOD0(flush, void()); - - MOCK_METHOD0(clear, void()); -}; - -MockNeighborBulker *mock_neighbor_bulker; - -sai_status_t mock_create_entry( - _Out_ sai_status_t *object_status, - _In_ const sai_neighbor_entry_t *entry, - _In_ uint32_t attr_count, - _In_ const sai_attribute_t *attr_list) -{ - return mock_neighbor_bulker->create_entry(object_status, entry, attr_count, attr_list); -} - -sai_status_t mock_remove_entry( - _Out_ sai_status_t *object_status, - _In_ const sai_neighbor_entry_t *entry) -{ - return mock_neighbor_bulker->remove_entry(object_status, entry); -} - -void mock_flush() -{ - return mock_neighbor_bulker->flush(); -} - -void mock_clear() -{ - return mock_neighbor_bulker->clear(); -} \ No newline at end of file diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index 504bbbf41a..52aa29d24e 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -28,6 +28,8 @@ namespace mux_rollback_test using namespace mock_orch_test; using ::testing::Return; using ::testing::Throw; + using ::testing::DoAll; + using ::testing::SetArrayArgument; static const string TEST_INTERFACE = "Ethernet4"; @@ -163,31 +165,35 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) { + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) { SetAndAssertMuxState(ACTIVE_STATE); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) { + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; EXPECT_CALL(*mock_sai_route_api, remove_route_entries) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) { SetAndAssertMuxState(ACTIVE_STATE); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; EXPECT_CALL(*mock_sai_route_api, create_route_entries) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(STANDBY_STATE); } @@ -271,4 +277,12 @@ namespace mux_rollback_test SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopTableFullRollbackToActive) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } } From 1cfcb4a98dbdd96a1fd40fe084ff4dbffe327597 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic Date: Tue, 4 Jun 2024 17:35:34 +0000 Subject: [PATCH 9/9] addressing PR comments Signed-off-by: Nikola Dancejic --- orchagent/muxorch.cpp | 4 +--- orchagent/neighorch.cpp | 1 - orchagent/neighorch.h | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index bcee8569c1..ce6bf2baf2 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -806,10 +806,9 @@ bool MuxNbrHandler::enable(bool update_rt) if (update_rt) { route_ctx_list.push_back(MuxRouteBulkContext(pfx)); + updateTunnelRoute(nh_key, false); } - updateTunnelRoute(nh_key, false); - it++; } @@ -818,7 +817,6 @@ bool MuxNbrHandler::enable(bool update_rt) return false; } - gRouteBulker.clear(); return true; } diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index ce817b026f..df96405791 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -1081,7 +1081,6 @@ bool NeighOrch::addNeighbor(NeighborContext& ctx) } else if (isHwConfigured(neighborEntry)) { - ctx.set_neigh_attr_count = (int)neighbor_attrs.size(); for (auto itr : neighbor_attrs) { status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index e6e1f380c4..0b59181db1 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -52,9 +52,7 @@ struct NeighborContext NeighborEntry neighborEntry; // neighbor entry to process std::deque object_statuses; // bulk statuses MacAddress mac; // neighbor mac - bool bulk_op = false; // use bulker (only for mux use for now) - int set_neigh_attr_count = 0; // number of attr set NeighborContext(NeighborEntry neighborEntry) : neighborEntry(neighborEntry)