From 03a2683f272d28876596d62a309f9e1cc2ae26fa Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 21 Mar 2022 04:55:19 -0700 Subject: [PATCH 01/10] Support tunnel traffic remap Signed-off-by: bingwang --- orchagent/muxorch.cpp | 32 ++++- orchagent/qosorch.cpp | 8 ++ orchagent/tunneldecaporch.cpp | 227 +++++++++++++++++++++++++++------- orchagent/tunneldecaporch.h | 38 +++++- 4 files changed, 251 insertions(+), 54 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 5b7b0570a5..91c2e80821 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -42,7 +42,6 @@ extern sai_next_hop_api_t* sai_next_hop_api; extern sai_router_interface_api_t* sai_router_intfs_api; /* Constants */ -#define MUX_TUNNEL "MuxTunnel0" #define MUX_ACL_TABLE_NAME INGRESS_TABLE_DROP #define MUX_ACL_RULE_NAME "mux_acl_rule" #define MUX_HW_STATE_UNKNOWN "unknown" @@ -162,7 +161,11 @@ static sai_status_t remove_route(IpPrefix &pfx) return status; } -static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* p_src_ip) +static sai_object_id_t create_tunnel( + const IpAddress* p_dst_ip, + const IpAddress* p_src_ip, + sai_object_id_t tc_to_dscp_map_id, + sai_object_id_t tc_to_queue_map_id) { sai_status_t status; @@ -206,6 +209,12 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); + // Set DSCP mode to PIPE to ensure that outer DSCP is independent of inner DSCP + // and inner DSCP is unchanged at decap + attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; + attr.value.s32 = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + tunnel_attrs.push_back(attr); + attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_DROP; tunnel_attrs.push_back(attr); @@ -224,6 +233,20 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* tunnel_attrs.push_back(attr); } + // DSCP rewriting + if (tc_to_dscp_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP; + attr.value.oid = tc_to_dscp_map_id; + } + + // TC remapping + if (tc_to_queue_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP; + attr.value.oid = tc_to_queue_map_id; + } + sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -1229,10 +1252,11 @@ bool MuxOrch::handlePeerSwitch(const Request& request) MUX_TUNNEL, peer_ip.to_string().c_str()); return false; } - + sai_object_id_t tc_to_dscp_map_id = decap_orch_->getTCToDSCPMap(MUX_TUNNEL); + sai_object_id_t tc_to_queue_map_id = decap_orch_->getTCToQueueMap(MUX_TUNNEL); auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip); + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id); SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index d1a24cb5c9..b46a234056 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -103,6 +103,14 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) SWSS_LOG_ENTER(); sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; + /* + ToDo: + As we are going to have more than one QosMap in type DSCP_TO_TC_MAP, TC_TO_PRIORITY_GROUP_MAP and TC_TO_QUEUE_MAP, + we need some mechanism to which one is switch level, and which one is for tunnel use only. + Two options now: + 1. Hardcode the switch level map name + 2. Always use the first map as switch level QoS map + */ auto it = consumer.m_toSync.begin(); KeyOpFieldsValuesTuple tuple = it->second; string qos_object_name = kfvKey(tuple); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 6ef2c96f74..a70b948999 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -40,19 +40,36 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string key = kfvKey(t); string op = kfvOp(t); - IpAddresses ip_addresses; - IpAddress src_ip; + IpAddresses dst_ip_addresses; + IpAddress src_ip("0.0.0.0"); IpAddress* p_src_ip = nullptr; string tunnel_type; string dscp_mode; string ecn_mode; string encap_ecn_mode; string ttl_mode; + sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + + bool valid = true; + sai_object_id_t tunnel_id = SAI_NULL_OBJECT_ID; + // checking to see if the tunnel already exists bool exists = (tunnelTable.find(key) != tunnelTable.end()); - + if (exists) + { + tunnel_id = tunnelTable[key].tunnel_id; + } + //Tunnel term type is set to P2P for mux tunnel to apply different configs + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; + if (key == MUX_TUNNEL) + { + term_type = TUNNEL_TERM_TYPE_P2P; + } if (op == SET_COMMAND) { @@ -72,7 +89,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { try { - ip_addresses = IpAddresses(fvValue(i)); + dst_ip_addresses = IpAddresses(fvValue(i)); } catch (const std::invalid_argument &e) { @@ -80,10 +97,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) valid = false; break; } - if (exists) - { - setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); - } } else if (fvField(i) == "src_ip") { @@ -114,7 +127,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), dscp_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), dscp_mode, tunnel_id); } } else if (fvField(i) == "ecn_mode") @@ -128,7 +141,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), ecn_mode, tunnel_id); } } else if (fvField(i) == "encap_ecn_mode") @@ -142,7 +155,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), encap_ecn_mode, tunnel_id); } } else if (fvField(i) == "ttl_mode") @@ -156,15 +169,47 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ttl_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), ttl_mode, tunnel_id); + } + } + else if (fvField(i) == DECAP_DSCP_TO_DC_MAP) + { + string dscp_to_dc_map_name = fvValue(i); + // TODO: Validate DSCP_TO_TC_MAP map name, and get map id + if (exists) + { + setTunnelAttribute(fvField(i), dscp_to_dc_map, tunnel_id); } } + else if (fvField(i) == DECAP_TC_TO_PG_MAP) + { + string tc_to_pg_map_name = fvValue(i); + // TODO: Validate TC_TO_PG_MAP name, and get map id + if (exists) + { + setTunnelAttribute(fvField(i), tc_to_pg_map, tunnel_id); + } + } + else if (fvField(i) == ENCAP_TC_TO_DSCP_MAP) + { + // TODO: + } + else if (fvField(i) == ENCAP_TC_TO_QUEUE_MAP) + { + // TODO: + } } - // create new tunnel if it doesn't exists already - if (valid && !exists) + if (valid) { - if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode)) + if (exists) + { + // Update existing tunnel terms + setIpAttribute(key, src_ip, dst_ip_addresses, tunnel_id, term_type); + } + // create new tunnel if it doesn't exists already + else if (addDecapTunnel(key, tunnel_type, dst_ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_dc_map_id, tc_to_pg_map_id, tc_to_dscp_map_id, tc_to_queue_map_id)) { SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } @@ -202,17 +247,35 @@ void TunnelDecapOrch::doTask(Consumer& consumer) * @param[in] dscp - dscp mode (uniform/pipe) * @param[in] ecn - ecn mode (copy_from_outer/standard) * @param[in] ttl - ttl mode (uniform/pipe) + * @param[in] term_type - The type of tunnel term + * @param[in] dscp_to_tc_map_id - Map ID for remapping DSCP to TC (decap) + * @param[in] tc_to_pg_map_id - Map ID for remapping TC to PG (decap) + * @param[in] tc_to_dscp_map_id - Map ID for remapping TC to DSCP (encap) + * @param[in] tc_to_queue_map_id - Map ID for remapping TC to queue (encap) * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip, IpAddress* p_src_ip, string dscp, string ecn, string encap_ecn, string ttl) +bool TunnelDecapOrch::addDecapTunnel( + string key, + string type, + IpAddresses dst_ip, + IpAddress* p_src_ip, + string dscp, + string ecn, + string encap_ecn, + string ttl, + TunnelTermType term_type, + sai_object_id_t dscp_to_tc_map_id, + sai_object_id_t tc_to_pg_map_id, + sai_object_id_t tc_to_dscp_map_id, + sai_object_id_t tc_to_queue_map_id) { SWSS_LOG_ENTER(); sai_status_t status; - + IpAddress src_ip("0.0.0.0"); // adding tunnel attributes to array and writing to ASIC_DB sai_attribute_t attr; vector tunnel_attrs; @@ -264,6 +327,7 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; copy(attr.value.ipaddr, p_src_ip->to_string()); tunnel_attrs.push_back(attr); + src_ip = *p_src_ip; } // decap ecn mode (copy from outer/standard) @@ -312,6 +376,22 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } tunnel_attrs.push_back(attr); + // DSCP_TO_TC_MAP + if (dscp_to_tc_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + attr.value.oid = dscp_to_tc_map_id; + tunnel_attrs.push_back(attr); + } + + //TC_TO_PG_MAP + if (tc_to_pg_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + attr.value.oid = tc_to_pg_map_id; + tunnel_attrs.push_back(attr); + } + // write attributes to ASIC_DB sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); @@ -325,10 +405,10 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {} }; + tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, tc_to_dscp_map_id, tc_to_queue_map_id }; - // create a decap tunnel entry for every ip - if (!addDecapTunnelTermEntries(key, dst_ip, tunnel_id)) + // create a decap tunnel entry for every source_ip - dest_ip pair + if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) { return false; } @@ -342,13 +422,15 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip * * Arguments: * @param[in] tunnelKey - key of the tunnel from APP_DB + * @param[in] src_ip - source ip address of decap tunnel * @param[in] dst_ip - destination ip addresses to decap * @param[in] tunnel_id - the id of the tunnel + * @param[in] term_type - P2P or P2MP. Other types (MP2P and MP2MP) not supported yet * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses dst_ip, sai_object_id_t tunnel_id) +bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) { SWSS_LOG_ENTER(); @@ -361,7 +443,14 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; - attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P; + } + else + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + } tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; @@ -372,6 +461,14 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds attr.value.oid = tunnel_id; tunnel_table_entry_attrs.push_back(attr); + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + // Set src ip for P2P only + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP; + copy(attr.value.ipaddr, src_ip); + tunnel_table_entry_attrs.push_back(attr); + } + TunnelEntry *tunnel_info = &tunnelTable.find(tunnelKey)->second; // loop through the IP list and create a new tunnel table entry for every IP (in network byte order) @@ -379,12 +476,14 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds for (auto it = tunnel_ips.begin(); it != tunnel_ips.end(); ++it) { const IpAddress& ia = *it; - string ip = ia.to_string(); + string dst_ip = ia.to_string(); + // Key is src_ip-dst_ip, 10.1.1.1-20.2.2.2 + string key = src_ip.to_string + '-' + dst_ip; - // check if the there's an entry already for the ip - if (existingIps.find(ip) != existingIps.end()) + // check if the there's an entry already for the key pair + if (existingIps.find(key) != existingIps.end()) { - SWSS_LOG_ERROR("%s already exists. Did not create entry.", ip.c_str()); + SWSS_LOG_ERROR("%s already exists. Did not create entry.", key.c_str()); } else { @@ -397,7 +496,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry(&tunnel_term_table_entry_id, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", ip.c_str()); + SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", key.c_str()); task_process_status handle_status = handleSaiCreateStatus(SAI_API_TUNNEL, status); if (handle_status != task_success) { @@ -406,15 +505,15 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds } // insert into ip to entry mapping - existingIps.insert(ip); + existingIps.insert(key); // insert entry id and ip into tunnel mapping - tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip, dst_ip, tunnel_type }); // pop the last element for the next loop tunnel_table_entry_attrs.pop_back(); - SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", ip.c_str()); + SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", dst_ip.c_str()); } } @@ -490,6 +589,21 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ } } + if (field == DECAP_DSCP_TO_DC_MAP) + { + // TC remapping. + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + // TODO: Fill value of DSCP_TO_TC_MAP + + } + + if (field == DECAP_TC_TO_PG_MAP) + { + // TC to PG remapping + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + // TODO: Fill value of TC_TO_PRIORITY_GROUP_MAP + } + sai_status_t status = sai_tunnel_api->set_tunnel_attribute(existing_tunnel_id, &attr); if (status != SAI_STATUS_SUCCESS) { @@ -510,13 +624,15 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ * * Arguments: * @param[in] key - key of the tunnel from APP_DB - * @param[in] new_ip_addresses - new destination ip addresses to decap (comes from APP_DB) + * @param[in] src_ip - source IP for tunnel term + * @param[in] new_dst_ip_addresses - new destination ip addresses to decap (comes from APP_DB) * @param[in] tunnel_id - the id of the tunnel + * @param[in] term_type - The type of tunnel term * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, sai_object_id_t tunnel_id) +bool TunnelDecapOrch::setIpAttribute(string key, IpAddress src_ip, IpAddresses new_dst_ip_addresses, sai_object_id_t tunnel_id, TunnelTermType term_type) { TunnelEntry *tunnel_info = &tunnelTable.find(key)->second; @@ -524,16 +640,16 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s vector tunnel_term_info_copy(tunnel_info->tunnel_term_info); tunnel_info->tunnel_term_info.clear(); - tunnel_info->dst_ip_addrs = new_ip_addresses; - + tunnel_info->dst_ip_addrs = new_dst_ip_addresses; + string src_ip = src_ip.to_string(); // loop through original ips and remove ips not in the new ip_addresses for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string ip = tunnel_entry_info.ip_address; - if (!new_ip_addresses.contains(ip)) + string key = src_ip + '-' + tunnel_entry_info.dst_ip; + if (!new_dst_ip_addresses.contains(key)) { - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) + if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, key)) { return false; } @@ -541,12 +657,12 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s else { // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, src_ip, dst_ip, term_type }); } } // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, new_ip_addresses, tunnel_id)) + if(!addDecapTunnelTermEntries(key, src_ip, new_dst_ip_addresses, tunnel_id, term_type)) { return false; } @@ -573,7 +689,8 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, tunnel_entry_info.ip_address)) + string term_key = tunnel_entry_info.src_ip + '-' + tunnel_entry_info.dst_ip; + if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) { return false; } @@ -618,7 +735,7 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string ip) +bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string key) { sai_status_t status; @@ -634,8 +751,8 @@ bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, } // making sure to remove all instances of the ip address - existingIps.erase(ip); - SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", ip.c_str()); + existingIps.erase(key); + SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", key.c_str()); return true; } @@ -793,7 +910,7 @@ bool TunnelDecapOrch::removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAd return true; } -IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) +IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) const { if (tunnelTable.find(tunnelKey) == tunnelTable.end()) { @@ -803,3 +920,25 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) return tunnelTable[tunnelKey].dst_ip_addrs; } + +sai_object_id_t TunnelDecapOrch::getTCToDSCPMap(std::string tunnelKey) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_ERROR("Tunnel config not found %s", tunnelKey.c_str()); + return SAI_NULL_OBJECT_ID; + } + return iter->second.tc_to_dscp_map_id; +} + +sai_object_id_t TunnelDecapOrch::getTCToQueueMap(std::string tunnelKey) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_ERROR("Tunnel config not found %s", tunnelKey.c_str()); + return SAI_NULL_OBJECT_ID; + } + return iter->second.tc_to_queue_map_id; +} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index f7b5f923d9..6e4987d8c6 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -9,10 +9,28 @@ #include "ipaddress.h" #include "ipaddresses.h" + +enum TunnelTermType +{ + TUNNEL_TERM_TYPE_P2P, + TUNNEL_TERM_TYPE_P2MP +}; + +/* Constants */ +#define MUX_TUNNEL "MuxTunnel0" + +#define DECAP_DSCP_TO_DC_MAP "decap_dscp_to_tc_map" +#define DECAP_TC_TO_PG_MAP "decap_tc_to_pg_map" + +#define ENCAP_TC_TO_QUEUE_MAP "encap_tc_to_queue_map" +#define ENCAP_TC_TO_DSCP_MAP "encap_tc_to_dscp_map" + struct TunnelTermEntry { sai_object_id_t tunnel_term_id; - std::string ip_address; + std::string src_ip; + std::string dst_ip; + TunnelTermType term_type; }; struct TunnelEntry @@ -21,6 +39,8 @@ struct TunnelEntry sai_object_id_t overlay_intf_id; // overlay interface id swss::IpAddresses dst_ip_addrs; // destination ip addresses std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + sai_object_id_t tc_to_dscp_map_id; // map for re-writing DSCP value of tunnel traffic + sai_object_id_t tc_to_queue_map_id; // map for remapping tunnel traffic into another queue }; struct NexthopTunnel @@ -32,7 +52,10 @@ struct NexthopTunnel /* TunnelTable: key string, tunnel object id */ typedef std::map TunnelTable; -/* ExistingIps: ips that currently have term entries */ +/* + ExistingIps: ips that currently have term entries, + Key in ExistingIps is src_ip-dst_ip +*/ typedef std::unordered_set ExistingIps; /* Nexthop IP to refcount map */ @@ -48,7 +71,9 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); - swss::IpAddresses getDstIpAddresses(std::string tunnelKey); + swss::IpAddresses getDstIpAddresses(std::string tunnelKey) const; + sai_object_id_t getTCToDSCPMap(std::string tunnelKey) const; + sai_object_id_t getTCToQueueMap(std::string tunnelKey) const; private: TunnelTable tunnelTable; @@ -56,14 +81,15 @@ class TunnelDecapOrch : public Orch TunnelNhs tunnelNhs; bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, - std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl); + std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, TunnelTermType term_type, + sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id, sai_object_id_t tc_to_dscp_map_id, sai_object_id_t tc_to_queue_map_id); bool removeDecapTunnel(std::string key); - bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id); + bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType type); bool removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, std::string ip); bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); - bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); + bool setIpAttribute(std::string key, swss::IpAddress src_ip, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id, TunnelTermType term_type); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); int incNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); From 094494bf03d2e2f107a17287073fecd34bbf4eb7 Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 28 Mar 2022 07:32:53 -0700 Subject: [PATCH 02/10] Address comments Signed-off-by: bingwang --- orchagent/muxorch.cpp | 58 ++++++++++++++++-- orchagent/muxorch.h | 8 +++ orchagent/qosorch.cpp | 91 +++++++++++++++++++++++++++- orchagent/qosorch.h | 11 ++++ orchagent/tunneldecaporch.cpp | 108 ++++++++++++---------------------- orchagent/tunneldecaporch.h | 7 --- 6 files changed, 201 insertions(+), 82 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 91c2e80821..63520d4b3d 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -23,6 +23,7 @@ #include "aclorch.h" #include "routeorch.h" #include "fdborch.h" +#include "qosorch.h" /* Global variables */ extern Directory gDirectory; @@ -81,6 +82,11 @@ const map muxStateStringToVal = { "pending", MuxState::MUX_STATE_PENDING }, }; +const map tunnel_qos_to_ref_table_map = { + {encap_tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, + {encap_tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME} +}; + static inline MuxStateChange mux_state_change (MuxState prev, MuxState curr) { auto key = std::make_pair(prev, curr); @@ -1177,7 +1183,9 @@ MuxOrch::MuxOrch(DBConnector *db, const std::vector &tables, Orch2(db, tables, request_), decap_orch_(decapOrch), neigh_orch_(neighOrch), - fdb_orch_(fdbOrch) + fdb_orch_(fdbOrch), + cfgTunnelTable_(db, CFG_TUNNEL_TABLE_NAME) + { handler_map_.insert(handler_pair(CFG_MUX_CABLE_TABLE_NAME, &MuxOrch::handleMuxCfg)); handler_map_.insert(handler_pair(CFG_PEER_SWITCH_TABLE_NAME, &MuxOrch::handlePeerSwitch)); @@ -1231,6 +1239,46 @@ bool MuxOrch::handleMuxCfg(const Request& request) return true; } +// Retrieve tc_to_queue_map and tc_to_dscp_map from CONFIG_DB, and +// resolve the ids from QosOrch +bool MuxOrch::resolveQosTableIds() +{ + std::vector field_value_tuples; + if (cfgTunnelTable_.get(MUX_TUNNEL, field_value_tuples)) + { + KeyOpFieldsValuesTuple tuple{"TUNNEL", MUX_TUNNEL, field_value_tuples}; + for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) + { + if (tunnel_qos_to_ref_table_map.find(fvField(*it)) != tunnel_qos_to_ref_table_map.end()) + { + sai_object_id_t id; + string object_name; + string &map_type_name = fvField(*it); + string &map_name = fvValue(*it); + ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, tunnel_qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + if (status == ref_resolve_status::success) + { + if (map_type_name == encap_tc_to_queue_field_name) + { + tc_to_queue_map_id_ = id; + } + else if (map_type_name == encap_tc_to_dscp_field_name) + { + tc_to_dscp_map_id_ = id; + } + SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str()); + } + } + } + return true; + } + else + { + SWSS_LOG_ERROR("Failed to read config from CONFIG_DB for %s", MUX_TUNNEL); + return false; + } +} + bool MuxOrch::handlePeerSwitch(const Request& request) { SWSS_LOG_ENTER(); @@ -1252,11 +1300,13 @@ bool MuxOrch::handlePeerSwitch(const Request& request) MUX_TUNNEL, peer_ip.to_string().c_str()); return false; } - sai_object_id_t tc_to_dscp_map_id = decap_orch_->getTCToDSCPMap(MUX_TUNNEL); - sai_object_id_t tc_to_queue_map_id = decap_orch_->getTCToQueueMap(MUX_TUNNEL); + if (!resolveQosTableIds()) + { + return false; + } auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id); + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_); SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 6e4f70408c..359d4837bb 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -29,6 +29,9 @@ enum MuxStateChange MUX_STATE_UNKNOWN_STATE }; +const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map"; +const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map"; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -196,8 +199,12 @@ class MuxOrch : public Orch2, public Observer, public Subject bool getMuxPort(const MacAddress&, const string&, string&); + bool resolveQosTableIds(); + IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_queue_map_id_ = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_dscp_map_id_ = SAI_NULL_OBJECT_ID; MuxCableTb mux_cable_tb_; MuxTunnelNHs mux_tunnel_nh_; @@ -210,6 +217,7 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; + Table cfgTunnelTable_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index b46a234056..31a6d76915 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -79,6 +79,7 @@ type_map QosOrch::m_qos_maps = { {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, {CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, {CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, + {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()} }; map qos_to_ref_table_map = { @@ -92,7 +93,9 @@ map qos_to_ref_table_map = { {scheduler_field_name, CFG_SCHEDULER_TABLE_NAME}, {wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME}, {dscp_to_fc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME}, - {exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME} + {exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME}, + {decap_dscp_to_tc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME}, + {decap_tc_to_pg_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME} }; #define DSCP_MAX_VAL 63 @@ -1066,6 +1069,84 @@ sai_object_id_t ExpToFcMapHandler::addQosItem(const vector &att return sai_object; } +bool TcToDscpMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, + vector &attributes) +{ + SWSS_LOG_ENTER(); + + sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); + + sai_attribute_t list_attr; + list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + list_attr.value.qosmap.count = (uint32_t)kfvFieldsValues(tuple).size(); + list_attr.value.qosmap.list = new sai_qos_map_t[list_attr.value.qosmap.count](); + uint32_t ind = 0; + + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++) + { + try + { + auto value = stoi(fvValue(*i)); + if (value < 0) + { + SWSS_LOG_ERROR("DSCP value %d is negative", value); + delete[] list_attr.value.qosmap.list; + return false; + } + else if (value > DSCP_MAX_VAL) + { + SWSS_LOG_ERROR("DSCP value %d is greater than max value %d", value, DSCP_MAX_VAL); + delete[] list_attr.value.qosmap.list; + return false; + } + list_attr.value.qosmap.list[ind].key.tc = static_cast(stoi(fvField(*i))); + list_attr.value.qosmap.list[ind].value.dscp = static_cast(value); + + SWSS_LOG_DEBUG("key.tc:%d, value.dscp:%d", + list_attr.value.qosmap.list[ind].key.tc, + list_attr.value.qosmap.list[ind].value.dscp); + } + catch(const invalid_argument& e) + { + SWSS_LOG_ERROR("Got exception during conversion: %s", e.what()); + delete[] list_attr.value.qosmap.list; + return false; + } + } + attributes.push_back(list_attr); + return true; +} + +sai_object_id_t TcToDscpMapHandler::addQosItem(const vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_status_t sai_status; + sai_object_id_t sai_object; + vector qos_map_attrs; + + sai_attribute_t qos_map_attr; + qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE; + qos_map_attr.value.u32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP; + qos_map_attrs.push_back(qos_map_attr); + + qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count; + qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list; + qos_map_attrs.push_back(qos_map_attr); + + sai_status = sai_qos_map_api->create_qos_map(&sai_object, + gSwitchId, + (uint32_t)qos_map_attrs.size(), + qos_map_attrs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create tc_to_dscp map. status:%d", sai_status); + return SAI_NULL_OBJECT_ID; + } + SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); + return sai_object; +} + task_process_status QosOrch::handleExpToFcTable(Consumer& consumer) { SWSS_LOG_ENTER(); @@ -1080,6 +1161,13 @@ task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer) return pfc_to_queue_handler.processWorkItem(consumer); } +task_process_status QosOrch::handleTcToDscpTable(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + TcToDscpMapHandler tc_to_dscp_handler; + return tc_to_dscp_handler.processWorkItem(consumer); +} + QosOrch::QosOrch(DBConnector *db, vector &tableNames) : Orch(db, tableNames) { SWSS_LOG_ENTER(); @@ -1106,6 +1194,7 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_WRED_PROFILE_TABLE_NAME, &QosOrch::handleWredProfileTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_DSCP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleDscpToFcTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_EXP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleExpToFcTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAMEļ¼Œ&QosOrch::handleTcToDscpTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable)); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 613bc7437e..0e433e9416 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -28,6 +28,8 @@ const string yellow_drop_probability_field_name = "yellow_drop_probability"; const string green_drop_probability_field_name = "green_drop_probability"; const string dscp_to_fc_field_name = "dscp_to_fc_map"; const string exp_to_fc_field_name = "exp_to_fc_map"; +const string decap_dscp_to_tc_field_name = "decap_dscp_to_tc_map"; +const string decap_tc_to_pg_field_name = "decap_tc_to_pg_map"; const string wred_profile_field_name = "wred_profile"; const string wred_red_enable_field_name = "wred_red_enable"; @@ -147,6 +149,14 @@ class ExpToFcMapHandler : public QosMapHandler sai_object_id_t addQosItem(const vector &attributes) override; }; +// Handler for TC_TO_DSCP_MAP +class TcToDscpMapHandler : public QosMapHandler +{ +public: + bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; + sai_object_id_t addQosItem(const vector &attributes) override; +}; + class QosOrch : public Orch { public: @@ -177,6 +187,7 @@ class QosOrch : public Orch task_process_status handleWredProfileTable(Consumer& consumer); task_process_status handleDscpToFcTable(Consumer& consumer); task_process_status handleExpToFcTable(Consumer& consumer); + task_process_status handleTcToDscpTable(Consumer& consumer); sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index a70b948999..b5ac43bddf 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -41,7 +41,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string op = kfvOp(t); IpAddresses dst_ip_addresses; - IpAddress src_ip("0.0.0.0"); + IpAddress src_ip_address("0.0.0.0"); IpAddress* p_src_ip = nullptr; string tunnel_type; string dscp_mode; @@ -50,9 +50,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ttl_mode; sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; - sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; - sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; - + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; bool valid = true; @@ -64,12 +62,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { tunnel_id = tunnelTable[key].tunnel_id; } - //Tunnel term type is set to P2P for mux tunnel to apply different configs - TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; - if (key == MUX_TUNNEL) - { - term_type = TUNNEL_TERM_TYPE_P2P; - } + if (op == SET_COMMAND) { @@ -97,13 +90,19 @@ void TunnelDecapOrch::doTask(Consumer& consumer) valid = false; break; } + if (exists) + { + setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); + } } else if (fvField(i) == "src_ip") { try { - src_ip = IpAddress(fvValue(i)); - p_src_ip = &src_ip; + src_ip_address = IpAddress(fvValue(i)); + p_src_ip = &src_ip_address; + //Tunnel term type is set to P2P when source ip is present + term_type = TUNNEL_TERM_TYPE_P2P; } catch (const std::invalid_argument &e) { @@ -190,26 +189,14 @@ void TunnelDecapOrch::doTask(Consumer& consumer) setTunnelAttribute(fvField(i), tc_to_pg_map, tunnel_id); } } - else if (fvField(i) == ENCAP_TC_TO_DSCP_MAP) - { - // TODO: - } - else if (fvField(i) == ENCAP_TC_TO_QUEUE_MAP) - { - // TODO: - } } - if (valid) + //create new tunnel if it doesn't exists already + if (valid && !exists) { - if (exists) - { - // Update existing tunnel terms - setIpAttribute(key, src_ip, dst_ip_addresses, tunnel_id, term_type); - } - // create new tunnel if it doesn't exists already - else if (addDecapTunnel(key, tunnel_type, dst_ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, - dscp_to_dc_map_id, tc_to_pg_map_id, tc_to_dscp_map_id, tc_to_queue_map_id)) + + if (addDecapTunnel(key, tunnel_type, dst_ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_dc_map_id, tc_to_pg_map_id)) { SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } @@ -250,8 +237,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) * @param[in] term_type - The type of tunnel term * @param[in] dscp_to_tc_map_id - Map ID for remapping DSCP to TC (decap) * @param[in] tc_to_pg_map_id - Map ID for remapping TC to PG (decap) - * @param[in] tc_to_dscp_map_id - Map ID for remapping TC to DSCP (encap) - * @param[in] tc_to_queue_map_id - Map ID for remapping TC to queue (encap) * * Return Values: * @return true on success and false if there's an error @@ -267,9 +252,7 @@ bool TunnelDecapOrch::addDecapTunnel( string ttl, TunnelTermType term_type, sai_object_id_t dscp_to_tc_map_id, - sai_object_id_t tc_to_pg_map_id, - sai_object_id_t tc_to_dscp_map_id, - sai_object_id_t tc_to_queue_map_id) + sai_object_id_t tc_to_pg_map_id) { SWSS_LOG_ENTER(); @@ -405,7 +388,7 @@ bool TunnelDecapOrch::addDecapTunnel( } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, tc_to_dscp_map_id, tc_to_queue_map_id }; + tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {} }; // create a decap tunnel entry for every source_ip - dest_ip pair if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) @@ -477,8 +460,17 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres { const IpAddress& ia = *it; string dst_ip = ia.to_string(); - // Key is src_ip-dst_ip, 10.1.1.1-20.2.2.2 - string key = src_ip.to_string + '-' + dst_ip; + // The key will be src_ip-dst_ip (like 10.1.1.1-20.2.2.2) if src_ip is not 0, + // or the key will contain dst_ip only + string key; + if (!src_ip.isZero()) + { + key = src_ip.to_string + '-' + dst_ip; + } + else + { + key = dst_ip; + } // check if the there's an entry already for the key pair if (existingIps.find(key) != existingIps.end()) @@ -624,15 +616,13 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ * * Arguments: * @param[in] key - key of the tunnel from APP_DB - * @param[in] src_ip - source IP for tunnel term - * @param[in] new_dst_ip_addresses - new destination ip addresses to decap (comes from APP_DB) + * @param[in] new_ip_addresses - new destination ip addresses to decap (comes from APP_DB) * @param[in] tunnel_id - the id of the tunnel - * @param[in] term_type - The type of tunnel term * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::setIpAttribute(string key, IpAddress src_ip, IpAddresses new_dst_ip_addresses, sai_object_id_t tunnel_id, TunnelTermType term_type) +bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, sai_object_id_t tunnel_id) { TunnelEntry *tunnel_info = &tunnelTable.find(key)->second; @@ -640,16 +630,16 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddress src_ip, IpAddresses n vector tunnel_term_info_copy(tunnel_info->tunnel_term_info); tunnel_info->tunnel_term_info.clear(); - tunnel_info->dst_ip_addrs = new_dst_ip_addresses; - string src_ip = src_ip.to_string(); + tunnel_info->dst_ip_addrs = new_ip_addresses; + // loop through original ips and remove ips not in the new ip_addresses for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string key = src_ip + '-' + tunnel_entry_info.dst_ip; - if (!new_dst_ip_addresses.contains(key)) + string ip = tunnel_entry_info.ip_address; + if (!new_ip_addresses.contains(ip)) { - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, key)) + if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) { return false; } @@ -657,12 +647,12 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddress src_ip, IpAddresses n else { // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, src_ip, dst_ip, term_type }); + tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, ip }); } } // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, src_ip, new_dst_ip_addresses, tunnel_id, term_type)) + if(!addDecapTunnelTermEntries(key, IPAddress('0.0.0.0'), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) { return false; } @@ -920,25 +910,3 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) const return tunnelTable[tunnelKey].dst_ip_addrs; } - -sai_object_id_t TunnelDecapOrch::getTCToDSCPMap(std::string tunnelKey) const -{ - auto iter = tunnelTable.find(tunnelKey); - if (iter == tunnelTable.end()) - { - SWSS_LOG_ERROR("Tunnel config not found %s", tunnelKey.c_str()); - return SAI_NULL_OBJECT_ID; - } - return iter->second.tc_to_dscp_map_id; -} - -sai_object_id_t TunnelDecapOrch::getTCToQueueMap(std::string tunnelKey) const -{ - auto iter = tunnelTable.find(tunnelKey); - if (iter == tunnelTable.end()) - { - SWSS_LOG_ERROR("Tunnel config not found %s", tunnelKey.c_str()); - return SAI_NULL_OBJECT_ID; - } - return iter->second.tc_to_queue_map_id; -} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 6e4987d8c6..5457b79934 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -22,9 +22,6 @@ enum TunnelTermType #define DECAP_DSCP_TO_DC_MAP "decap_dscp_to_tc_map" #define DECAP_TC_TO_PG_MAP "decap_tc_to_pg_map" -#define ENCAP_TC_TO_QUEUE_MAP "encap_tc_to_queue_map" -#define ENCAP_TC_TO_DSCP_MAP "encap_tc_to_dscp_map" - struct TunnelTermEntry { sai_object_id_t tunnel_term_id; @@ -39,8 +36,6 @@ struct TunnelEntry sai_object_id_t overlay_intf_id; // overlay interface id swss::IpAddresses dst_ip_addrs; // destination ip addresses std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) - sai_object_id_t tc_to_dscp_map_id; // map for re-writing DSCP value of tunnel traffic - sai_object_id_t tc_to_queue_map_id; // map for remapping tunnel traffic into another queue }; struct NexthopTunnel @@ -72,8 +67,6 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); swss::IpAddresses getDstIpAddresses(std::string tunnelKey) const; - sai_object_id_t getTCToDSCPMap(std::string tunnelKey) const; - sai_object_id_t getTCToQueueMap(std::string tunnelKey) const; private: TunnelTable tunnelTable; From 19f120a6f0c2e28d1453ea3924bfea91f224549a Mon Sep 17 00:00:00 2001 From: bingwang Date: Sat, 2 Apr 2022 04:37:05 -0700 Subject: [PATCH 03/10] Read all tables Signed-off-by: bingwang --- orchagent/muxorch.cpp | 10 ++---- orchagent/qosorch.cpp | 60 +++++++++++++++++++++++------------ orchagent/qosorch.h | 7 +++- orchagent/tunneldecaporch.cpp | 51 +++++++++++++++++++++++------ orchagent/tunneldecaporch.h | 6 ++-- 5 files changed, 92 insertions(+), 42 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 63520d4b3d..c599167ca1 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -82,11 +82,6 @@ const map muxStateStringToVal = { "pending", MuxState::MUX_STATE_PENDING }, }; -const map tunnel_qos_to_ref_table_map = { - {encap_tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, - {encap_tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME} -}; - static inline MuxStateChange mux_state_change (MuxState prev, MuxState curr) { auto key = std::make_pair(prev, curr); @@ -1249,13 +1244,13 @@ bool MuxOrch::resolveQosTableIds() KeyOpFieldsValuesTuple tuple{"TUNNEL", MUX_TUNNEL, field_value_tuples}; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { - if (tunnel_qos_to_ref_table_map.find(fvField(*it)) != tunnel_qos_to_ref_table_map.end()) + if (qos_to_ref_table_map.find(fvField(*it)) != qos_to_ref_table_map.end()) { sai_object_id_t id; string object_name; string &map_type_name = fvField(*it); string &map_name = fvValue(*it); - ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, tunnel_qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status == ref_resolve_status::success) { if (map_type_name == encap_tc_to_queue_field_name) @@ -1266,6 +1261,7 @@ bool MuxOrch::resolveQosTableIds() { tc_to_dscp_map_id_ = id; } + setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, MUX_TUNNEL, map_type_name, object_name); SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str()); } } diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 31a6d76915..3307d2ef0d 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -94,28 +94,46 @@ map qos_to_ref_table_map = { {wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME}, {dscp_to_fc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME}, {exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME}, - {decap_dscp_to_tc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME}, - {decap_tc_to_pg_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME} + {decap_dscp_to_tc_field_name, CFG_DSCP_TO_TC_MAP_TABLE_NAME}, + {decap_tc_to_pg_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {encap_tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, + {encap_tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME} }; #define DSCP_MAX_VAL 63 #define EXP_MAX_VAL 7 -task_process_status QosMapHandler::processWorkItem(Consumer& consumer) +task_process_status QosMapHandler::processAllWorkItem(Consumer& consumer) { SWSS_LOG_ENTER(); - sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; + bool first_one = true; + task_process_status status = task_process_status::task_success; /* - ToDo: As we are going to have more than one QosMap in type DSCP_TO_TC_MAP, TC_TO_PRIORITY_GROUP_MAP and TC_TO_QUEUE_MAP, - we need some mechanism to which one is switch level, and which one is for tunnel use only. - Two options now: - 1. Hardcode the switch level map name - 2. Always use the first map as switch level QoS map + we always use the first map as switch level (port level) QoS map */ auto it = consumer.m_toSync.begin(); - KeyOpFieldsValuesTuple tuple = it->second; + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple &tuple = it->second; + status = processWorkItem(tuple, first_one); + if (status != task_process_status::task_success) + { + // Stop parsing if any error seen + break; + } + first_one = false; + it = consumer.m_toSync.erase(it); + } + return status; +} + +task_process_status QosMapHandler::processWorkItem(KeyOpFieldsValuesTuple& tuple, bool is_switch_level) +{ + + + sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; string qos_object_name = kfvKey(tuple); string qos_map_type_name = consumer.getTableName(); string op = kfvOp(tuple); @@ -336,7 +354,7 @@ task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer) { SWSS_LOG_ENTER(); DscpToTcMapHandler dscp_tc_handler; - return dscp_tc_handler.processWorkItem(consumer); + return dscp_tc_handler.processAllWorkItem(consumer); } bool MplsTcToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -391,7 +409,7 @@ task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer) { SWSS_LOG_ENTER(); MplsTcToTcMapHandler mpls_tc_to_tc_handler; - return mpls_tc_to_tc_handler.processWorkItem(consumer); + return mpls_tc_to_tc_handler.processAllWorkItem(consumer); } bool Dot1pToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -460,7 +478,7 @@ task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer) { SWSS_LOG_ENTER(); Dot1pToTcMapHandler dot1p_tc_handler; - return dot1p_tc_handler.processWorkItem(consumer); + return dot1p_tc_handler.processAllWorkItem(consumer); } bool TcToQueueMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -513,7 +531,7 @@ task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer) { SWSS_LOG_ENTER(); TcToQueueMapHandler tc_queue_handler; - return tc_queue_handler.processWorkItem(consumer); + return tc_queue_handler.processAllWorkItem(consumer); } void WredMapHandler::freeAttribResources(vector &attributes) @@ -734,7 +752,7 @@ task_process_status QosOrch::handleWredProfileTable(Consumer& consumer) { SWSS_LOG_ENTER(); WredMapHandler wred_handler; - return wred_handler.processWorkItem(consumer); + return wred_handler.processAllWorkItem(consumer); } bool TcToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -787,7 +805,7 @@ task_process_status QosOrch::handleTcToPgTable(Consumer& consumer) { SWSS_LOG_ENTER(); TcToPgHandler tc_to_pg_handler; - return tc_to_pg_handler.processWorkItem(consumer); + return tc_to_pg_handler.processAllWorkItem(consumer); } bool PfcPrioToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -841,7 +859,7 @@ task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer) { SWSS_LOG_ENTER(); PfcPrioToPgHandler pfc_prio_to_pg_handler; - return pfc_prio_to_pg_handler.processWorkItem(consumer); + return pfc_prio_to_pg_handler.processAllWorkItem(consumer); } bool PfcToQueueHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -982,7 +1000,7 @@ task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer) { SWSS_LOG_ENTER(); DscpToFcMapHandler dscp_fc_handler; - return dscp_fc_handler.processWorkItem(consumer); + return dscp_fc_handler.processAllWorkItem(consumer); } bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, @@ -1151,21 +1169,21 @@ task_process_status QosOrch::handleExpToFcTable(Consumer& consumer) { SWSS_LOG_ENTER(); ExpToFcMapHandler exp_fc_handler; - return exp_fc_handler.processWorkItem(consumer); + return exp_fc_handler.processAllWorkItem(consumer); } task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer) { SWSS_LOG_ENTER(); PfcToQueueHandler pfc_to_queue_handler; - return pfc_to_queue_handler.processWorkItem(consumer); + return pfc_to_queue_handler.processAllWorkItem(consumer); } task_process_status QosOrch::handleTcToDscpTable(Consumer& consumer) { SWSS_LOG_ENTER(); TcToDscpMapHandler tc_to_dscp_handler; - return tc_to_dscp_handler.processWorkItem(consumer); + return tc_to_dscp_handler.processAllWorkItem(consumer); } QosOrch::QosOrch(DBConnector *db, vector &tableNames) : Orch(db, tableNames) diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 0e433e9416..ba5c037da0 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -30,6 +30,8 @@ const string dscp_to_fc_field_name = "dscp_to_fc_map"; const string exp_to_fc_field_name = "exp_to_fc_map"; const string decap_dscp_to_tc_field_name = "decap_dscp_to_tc_map"; const string decap_tc_to_pg_field_name = "decap_tc_to_pg_map"; +const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map"; +const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map"; const string wred_profile_field_name = "wred_profile"; const string wred_red_enable_field_name = "wred_red_enable"; @@ -58,10 +60,13 @@ const string ecn_green_red = "ecn_green_red"; const string ecn_green_yellow = "ecn_green_yellow"; const string ecn_all = "ecn_all"; +// Declaration for being referenced in muxorch and decaporch +extern std::map qos_to_ref_table_map; class QosMapHandler { public: - task_process_status processWorkItem(Consumer& consumer); + task_process_status processWorkItem(KeyOpFieldsValuesTuple& tuple, bool is_switch_level); + task_process_status processAllWorkItem(Consumer& consumer); virtual bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) = 0; virtual void freeAttribResources(vector &attributes); virtual bool modifyQosItem(sai_object_id_t, vector &attributes); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index b5ac43bddf..8daef1128e 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -5,6 +5,7 @@ #include "crmorch.h" #include "logger.h" #include "swssnet.h" +#include "qosorch.h" #define OVERLAY_RIF_DEFAULT_MTU 9100 @@ -171,22 +172,20 @@ void TunnelDecapOrch::doTask(Consumer& consumer) setTunnelAttribute(fvField(i), ttl_mode, tunnel_id); } } - else if (fvField(i) == DECAP_DSCP_TO_DC_MAP) + else if (fvField(i) == decap_dscp_to_tc_field_name) { - string dscp_to_dc_map_name = fvValue(i); - // TODO: Validate DSCP_TO_TC_MAP map name, and get map id - if (exists) + dscp_to_dc_map_id = resolveQosMapId(key, decap_dscp_to_tc_field_name, t); + if (exists && dscp_to_dc_map_id != SAI_NULL_OBJECT_ID) { - setTunnelAttribute(fvField(i), dscp_to_dc_map, tunnel_id); + setTunnelAttribute(fvField(i), dscp_to_dc_map_id, tunnel_id); } } - else if (fvField(i) == DECAP_TC_TO_PG_MAP) + else if (fvField(i) == decap_tc_to_pg_field_name) { - string tc_to_pg_map_name = fvValue(i); - // TODO: Validate TC_TO_PG_MAP name, and get map id - if (exists) + tc_to_pg_map_id = resolveQosMapId(key, decap_tc_to_pg_field_name, t); + if (exists && tc_to_pg_map_id != SAI_NULL_OBJECT_ID) { - setTunnelAttribute(fvField(i), tc_to_pg_map, tunnel_id); + setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); } } } @@ -910,3 +909,35 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) const return tunnelTable[tunnelKey].dst_ip_addrs; } + +/** + * Function Description: + * @brief Resolve the map id from QosOrch + * + * Arguments: + * @param[in] tunnle_name - The name of tunnel + * @param[in] map_type_name - The type of referenced QoS map + * @param[in] tuple - The KeyOpFieldsValuesTuple that contains keys - values + * + * Return Values: + * @return The sai_object_id of referenced map, or SAI_NULL_OBJECT_ID if there's an error + */ +sai_object_id_t TunnelDecapOrch::resolveQosMapId(std::string tunnle_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple) +{ + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + if (status == ref_resolve_status::success) + { + + setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, tunnle_name, map_type_name, object_name); + SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", tunnle_name.c_str(), map_type_name.c_str(), map_name.c_str()); + return id; + } + else + { + SWSS_LOG_ERROR("Failed to resolce QoS map for tunnel %s type %s", tunnle_name.c_str(), map_type_name.c_str()); + return SAI_NULL_OBJECT_ID; + } + +} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 5457b79934..8efae14b46 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -19,8 +19,6 @@ enum TunnelTermType /* Constants */ #define MUX_TUNNEL "MuxTunnel0" -#define DECAP_DSCP_TO_DC_MAP "decap_dscp_to_tc_map" -#define DECAP_TC_TO_PG_MAP "decap_tc_to_pg_map" struct TunnelTermEntry { @@ -75,7 +73,7 @@ class TunnelDecapOrch : public Orch bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, TunnelTermType term_type, - sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id, sai_object_id_t tc_to_dscp_map_id, sai_object_id_t tc_to_queue_map_id); + sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id); bool removeDecapTunnel(std::string key); bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType type); @@ -89,5 +87,7 @@ class TunnelDecapOrch : public Orch int decNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); void doTask(Consumer& consumer); + + sai_object_id_t resolveQosMapId(std::string tunnle_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple); }; #endif From 6168628b23276f35266043ee1873fc09647d8d48 Mon Sep 17 00:00:00 2001 From: bingwang Date: Fri, 8 Apr 2022 06:00:45 +0000 Subject: [PATCH 04/10] Build pass Signed-off-by: bingwang --- orchagent/muxorch.h | 3 -- orchagent/qosorch.cpp | 10 +++--- orchagent/qosorch.h | 2 +- orchagent/tunneldecaporch.cpp | 61 ++++++++++++++++++++++++++--------- orchagent/tunneldecaporch.h | 7 ++-- 5 files changed, 55 insertions(+), 28 deletions(-) diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 359d4837bb..6288bca0c6 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -29,9 +29,6 @@ enum MuxStateChange MUX_STATE_UNKNOWN_STATE }; -const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map"; -const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map"; - // Forward Declarations class MuxOrch; class MuxCableOrch; diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 3307d2ef0d..26446de804 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -113,11 +113,12 @@ task_process_status QosMapHandler::processAllWorkItem(Consumer& consumer) As we are going to have more than one QosMap in type DSCP_TO_TC_MAP, TC_TO_PRIORITY_GROUP_MAP and TC_TO_QUEUE_MAP, we always use the first map as switch level (port level) QoS map */ + string qos_map_type_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple &tuple = it->second; - status = processWorkItem(tuple, first_one); + status = processWorkItem(qos_map_type_name, tuple, first_one); if (status != task_process_status::task_success) { // Stop parsing if any error seen @@ -129,13 +130,12 @@ task_process_status QosMapHandler::processAllWorkItem(Consumer& consumer) return status; } -task_process_status QosMapHandler::processWorkItem(KeyOpFieldsValuesTuple& tuple, bool is_switch_level) +task_process_status QosMapHandler::processWorkItem(string qos_map_type_name, KeyOpFieldsValuesTuple& tuple, bool is_switch_level) { sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; string qos_object_name = kfvKey(tuple); - string qos_map_type_name = consumer.getTableName(); string op = kfvOp(tuple); if (QosOrch::getTypeMap()[qos_map_type_name]->find(qos_object_name) != QosOrch::getTypeMap()[qos_map_type_name]->end()) @@ -1092,8 +1092,6 @@ bool TcToDscpMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & { SWSS_LOG_ENTER(); - sai_uint8_t max_num_fcs = NhgMapOrch::getMaxNumFcs(); - sai_attribute_t list_attr; list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; list_attr.value.qosmap.count = (uint32_t)kfvFieldsValues(tuple).size(); @@ -1212,7 +1210,7 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_WRED_PROFILE_TABLE_NAME, &QosOrch::handleWredProfileTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_DSCP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleDscpToFcTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_EXP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleExpToFcTable)); - m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAMEļ¼Œ&QosOrch::handleTcToDscpTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAME, &QosOrch::handleTcToDscpTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable)); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index ba5c037da0..60ec8063d9 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -65,7 +65,7 @@ extern std::map qos_to_ref_table_map; class QosMapHandler { public: - task_process_status processWorkItem(KeyOpFieldsValuesTuple& tuple, bool is_switch_level); + task_process_status processWorkItem(std::string qos_map_type, KeyOpFieldsValuesTuple& tuple, bool is_switch_level); task_process_status processAllWorkItem(Consumer& consumer); virtual bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) = 0; virtual void freeAttribResources(vector &attributes); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 8daef1128e..7fa3af0acf 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -93,7 +93,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); + setIpAttribute(key, dst_ip_addresses, tunnelTable.find(key)->second.tunnel_id); } } else if (fvField(i) == "src_ip") @@ -195,7 +195,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { if (addDecapTunnel(key, tunnel_type, dst_ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, - dscp_to_dc_map_id, tc_to_pg_map_id)) + term_type, dscp_to_dc_map_id, tc_to_pg_map_id)) { SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } @@ -464,7 +464,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres string key; if (!src_ip.isZero()) { - key = src_ip.to_string + '-' + dst_ip; + key = src_ip.to_string() + '-' + dst_ip; } else { @@ -499,7 +499,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres existingIps.insert(key); // insert entry id and ip into tunnel mapping - tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip, dst_ip, tunnel_type }); + tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip.to_string(), dst_ip, tunnel_type }); // pop the last element for the next loop tunnel_table_entry_attrs.pop_back(); @@ -580,32 +580,63 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ } } - if (field == DECAP_DSCP_TO_DC_MAP) + sai_status_t status = sai_tunnel_api->set_tunnel_attribute(existing_tunnel_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set attribute %s with value %s\n", field.c_str(), value.c_str()); + task_process_status handle_status = handleSaiSetStatus(SAI_API_TUNNEL, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set attribute %s with value %s\n", field.c_str(), value.c_str()); + return true; +} + +/** + * Function Description: + * @brief sets attributes for a tunnel ļ¼ˆdecap_dscp_to_tc_map and decap_tc_to_pg_map are supported) + * + * Arguments: + * @param[in] field - field to set the attribute for + * @param[in] value - value to set the attribute to (sai_object_id) + * @param[in] existing_tunnel_id - the id of the tunnel you want to set the attribute for + * + * Return Values: + * @return true on success and false if there's an error + */ +bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id) +{ + + sai_attribute_t attr; + + if (field == "decap_dscp_to_tc_map") { // TC remapping. attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; - // TODO: Fill value of DSCP_TO_TC_MAP + attr.value.oid = value; } - if (field == DECAP_TC_TO_PG_MAP) + if (field == "decap_tc_to_pg_map") { // TC to PG remapping attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; - // TODO: Fill value of TC_TO_PRIORITY_GROUP_MAP + attr.value.oid = value; } sai_status_t status = sai_tunnel_api->set_tunnel_attribute(existing_tunnel_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set attribute %s with value %s\n", field.c_str(), value.c_str()); + SWSS_LOG_ERROR("Failed to set attribute %s with value %" PRIu64, field.c_str(), value); task_process_status handle_status = handleSaiSetStatus(SAI_API_TUNNEL, status); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } - SWSS_LOG_NOTICE("Set attribute %s with value %s\n", field.c_str(), value.c_str()); + SWSS_LOG_NOTICE("Set attribute %s with value %" PRIu64, field.c_str(), value); return true; } @@ -635,7 +666,7 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string ip = tunnel_entry_info.ip_address; + string ip = tunnel_entry_info.dst_ip; if (!new_ip_addresses.contains(ip)) { if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) @@ -646,12 +677,12 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s else { // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, "0.0.0.0", ip, TUNNEL_TERM_TYPE_P2MP }); } } // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, IPAddress('0.0.0.0'), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) + if(!addDecapTunnelTermEntries(key, IpAddress(0), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) { return false; } @@ -899,7 +930,7 @@ bool TunnelDecapOrch::removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAd return true; } -IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) const +IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) { if (tunnelTable.find(tunnelKey) == tunnelTable.end()) { @@ -931,7 +962,7 @@ sai_object_id_t TunnelDecapOrch::resolveQosMapId(std::string tunnle_name, std::s { setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, tunnle_name, map_type_name, object_name); - SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", tunnle_name.c_str(), map_type_name.c_str(), map_name.c_str()); + SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", tunnle_name.c_str(), map_type_name.c_str(), object_name.c_str()); return id; } else diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 8efae14b46..d286bfc0d7 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -64,7 +64,7 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); - swss::IpAddresses getDstIpAddresses(std::string tunnelKey) const; + swss::IpAddresses getDstIpAddresses(std::string tunnelKey); private: TunnelTable tunnelTable; @@ -80,7 +80,8 @@ class TunnelDecapOrch : public Orch bool removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, std::string ip); bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); - bool setIpAttribute(std::string key, swss::IpAddress src_ip, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id, TunnelTermType term_type); + bool setTunnelAttribute(std::string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id); + bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); int incNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); @@ -88,6 +89,6 @@ class TunnelDecapOrch : public Orch void doTask(Consumer& consumer); - sai_object_id_t resolveQosMapId(std::string tunnle_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple); + sai_object_id_t resolveQosMapId(std::string tunnle_name, std::string map_type_name, swss::KeyOpFieldsValuesTuple& tuple); }; #endif From dbf28d3fe2c44a6895d07d204214ea5e24a22f05 Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 11 Apr 2022 09:33:24 +0000 Subject: [PATCH 05/10] Update vs test Signed-off-by: bingwang --- tests/test_mux.py | 58 +++++++++++++++++++++++++---- tests/test_tunnel.py | 88 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 131 insertions(+), 15 deletions(-) diff --git a/tests/test_mux.py b/tests/test_mux.py index 20ac1832c1..0bb39bc059 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -21,6 +21,7 @@ class TestMuxTunnelBase(object): ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" CONFIG_MUX_CABLE = "MUX_CABLE" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" SERV1_IPV4 = "192.168.0.100" SERV1_IPV6 = "fc02:1000::100" @@ -46,6 +47,8 @@ class TestMuxTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + TC_TO_DSCP_MAP = {str(i):str{i} for i in range(0, 8)} + TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} def create_vlan_interface(self, confdb, asicdb, dvs): @@ -513,7 +516,7 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): return True - def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): + def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dscp_map_oid=None, tc_to_queue_map_oid=None): """ Create PEER entry verify all needed enties in ASIC DB exists """ peer_attrs = { @@ -541,6 +544,11 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): assert p2p_obj != None fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, p2p_obj) + + if tc_to_dscp_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs + if tc_to_queue_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP" in fvs for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": @@ -559,12 +567,17 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" elif field == "SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION": assert value == "SAI_PACKET_ACTION_DROP" + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP": + assert value == tc_to_dscp_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP": + assert value == tc_to_queue_map_oid else: assert False, "Field %s is not tested" % field - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" for term_entry in tunnel_term_entries: fvs = asicdb.get_entry(self.ASIC_TUNNEL_TERM_ENTRIES, term_entry) @@ -575,13 +588,15 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field @@ -633,8 +648,8 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) else: assert False, "Field %s is not tested" % field - - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) + src_ip = kwargs['src_ip'] if 'src_ip' in kwargs else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): @@ -663,6 +678,25 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) + # Apply QoS map to config db + table = swsscommon.Table(configdb, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + fvs_in_asicdb = asicdb.get_entry(self.ASIC_QOS_MAP_TABLE_KEY, oid) + assert(fvs_in_asicdb["SAI_QOS_MAP_ATTR_TYPE"] == qos_map_type_name) + return oid + + def remove_qos_map(self, configdb, qos_map_oid): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb, qos_map_type_name) + table._del(qos_map_oid) def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -693,8 +727,10 @@ def test_Tunnel(self, dvs, testlog): # create tunnel IPv4 tunnel self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - dst_ip="10.1.0.32", dscp_mode="uniform", - ecn_mode="standard", ttl_mode="pipe") + dst_ip="10.1.0.32", dscp_mode="pipe", + ecn_mode="standard", ttl_mode="pipe", + encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, + encap_tc_color_to_dscp_map=self.TUNNEL_QOS_MAP_NAME) def test_Peer(self, dvs, testlog): @@ -702,8 +738,14 @@ def test_Peer(self, dvs, testlog): db = dvs.get_config_db() asicdb = dvs.get_asic_db() + + tc_to_dscp_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", tc_to_dscp_map_oid, tc_to_queue_map_oid) - self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32") + self.remove_qos_map(configdb, tc_to_dscp_map_oid) + self.remove_qos_map(tc_to_queue_map_oid) def test_Neighbor(self, dvs, dvs_route, testlog): diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index b69e6b6b73..95f6c611cb 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -7,13 +7,14 @@ def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) - class TestTunnelBase(object): APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -30,6 +31,9 @@ class TestTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + # Define 2 dummy maps + DSCP_TO_TC_MAP = {str(i):str{1} for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str{i}:str{i} for i in range(0, 8)} def check_interface_exists_in_asicdb(self, asicdb, sai_oid): if_table = swsscommon.Table(asicdb, self.ASIC_RIF_TABLE) @@ -41,12 +45,13 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): status, fvs = vfr_table.get(sai_oid) return status - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) tunnel_term_entries = tunnel_term_table.getKeys() assert len(tunnel_term_entries) == len(dst_ips) + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" for term_entry in tunnel_term_entries: status, fvs = tunnel_term_table.get(term_entry) @@ -57,20 +62,31 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; + is_symmetric_tunnel = "src_ip" in kwargs + + decap_dscp_to_tc_map_oid = kwargs["decap_dscp_to_tc_map_oid"] if "decap_dscp_to_tc_map_oid" in kwargs else None + decap_tc_to_pg_map_oid = kwargs["decap_tc_to_pg_map_oid"] if "decap_tc_to_pg_map_oid" in kwargs else None + + if "decap_dscp_to_tc_map_oid" in kwargs: + kwargs.pop("decap_dscp_to_tc_map_oid") + + if "decap_tc_to_pg_map_oid" in kwargs: + kwargs.pop("decap_tc_to_pg_map_oid") # create tunnel entry in DB ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) @@ -100,7 +116,12 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] - + + if decap_dscp_to_tc_map_oid: + assert "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP" in fvs + if decap_tc_to_pg_map_oid: + assert "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP" in fvs + for field, value in fvs: if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" @@ -116,10 +137,14 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == dscp_to_tc_map_oid + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value = tc_to_pg_map_oid else: assert False, "Field %s is not tested" % field - - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) + src_ip = kwargs["src_ip"] if "src_ip" in kwargs else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -147,6 +172,26 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + """ Add qos map for testing""" + current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) + # Apply QoS map to config db + table = swsscommon.Table(configdb, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + fvs_in_asicdb = asicdb.get_entry(self.ASIC_QOS_MAP_TABLE_KEY, oid) + assert(fvs_in_asicdb["SAI_QOS_MAP_ATTR_TYPE"] == qos_map_type_name) + return oid + + def remove_qos_map(self, configdb, qos_map_oid): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb, qos_map_type_name) + table._del(qos_map_oid) def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -194,6 +239,35 @@ def test_TunnelDecap_v6(self, dvs, testlog): dst_ip="2::2,3::3", dscp_mode="pipe", ecn_mode="copy_from_outer", ttl_mode="uniform") self.remove_and_test_tunnel(db, asicdb,"IPINIPv6Decap") + + def test_TunnelDecap_MuxTunnel(self, dvs, testlog): + """ Test MuxTunnel creation. """ + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, asicdb) + + dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + # Create MuxTunnel0 with QoS remapping attributes + params = { + "tunnel_type": "IPINIP", + "src_ip": "1.1.1.1", + "dst_ip": "1.1.1.2", + "dscp_mode": "pipe", + "ecn_mode": "copy_from_outer", + "ttl_mode": "uniform", + "decap_dscp_to_tc_map": "AZURE_TUNNEL", + "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, + "decap_tc_to_pg_map": "AZURE_TUNNEL" + "decap_tc_to_pg_map_oid": tc_to_pg_map_oid + } + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) + + self.remove_qos_map(configdb, dscp_to_tc_map_oid) + self.remove_qos_map(configdb, tc_to_pg_map_oid) class TestSymmetricTunnel(TestTunnelBase): From 2af94da62a4e604f6ae6a9fe0b3a48b25181ab3a Mon Sep 17 00:00:00 2001 From: bingwang Date: Tue, 12 Apr 2022 08:27:44 +0000 Subject: [PATCH 06/10] Add vs test cases Signed-off-by: bingwang --- orchagent/muxorch.cpp | 27 ++++++++++++---- orchagent/orchdaemon.cpp | 3 +- orchagent/qosorch.cpp | 3 +- tests/test_mux.py | 61 ++++++++++++++++++------------------ tests/test_tunnel.py | 67 +++++++++++++++++++++++----------------- 5 files changed, 94 insertions(+), 67 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index c599167ca1..d7dc775706 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -166,7 +166,8 @@ static sai_object_id_t create_tunnel( const IpAddress* p_dst_ip, const IpAddress* p_src_ip, sai_object_id_t tc_to_dscp_map_id, - sai_object_id_t tc_to_queue_map_id) + sai_object_id_t tc_to_queue_map_id, + string dscp_mode_name) { sai_status_t status; @@ -210,10 +211,17 @@ static sai_object_id_t create_tunnel( attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); - // Set DSCP mode to PIPE to ensure that outer DSCP is independent of inner DSCP - // and inner DSCP is unchanged at decap + sai_tunnel_dscp_mode_t dscp_mode; + if (dscp_mode_name == "uniform") + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; + } + else + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + } attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; - attr.value.s32 = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + attr.value.s32 = dscp_mode; tunnel_attrs.push_back(attr); attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; @@ -239,6 +247,7 @@ static sai_object_id_t create_tunnel( { attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP; attr.value.oid = tc_to_dscp_map_id; + tunnel_attrs.push_back(attr); } // TC remapping @@ -246,6 +255,7 @@ static sai_object_id_t create_tunnel( { attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP; attr.value.oid = tc_to_queue_map_id; + tunnel_attrs.push_back(attr); } sai_object_id_t tunnel_id; @@ -1262,7 +1272,7 @@ bool MuxOrch::resolveQosTableIds() tc_to_dscp_map_id_ = id; } setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, MUX_TUNNEL, map_type_name, object_name); - SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str()); + SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str()); } } } @@ -1302,7 +1312,12 @@ bool MuxOrch::handlePeerSwitch(const Request& request) } auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_); + + // Read dscp_mode of MuxTunnel0 from config_db + string dscp_mode_name = "pipe"; + cfgTunnelTable_.hget(MUX_TUNNEL, "dscp_mode", dscp_mode_name); + + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_, dscp_mode_name); SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index a6563d45ca..0f4436ddf8 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -220,7 +220,8 @@ bool OrchDaemon::init() CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, - CFG_EXP_TO_FC_MAP_TABLE_NAME + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; gQosOrch = new QosOrch(m_configDb, qos_tables); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 96cbd7c284..8f514d45cf 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -79,7 +79,8 @@ type_map QosOrch::m_qos_maps = { {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, {CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, {CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()} + {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()}, + {CFG_TUNNEL_TABLE_NAME, new object_reference_map()} }; map qos_to_ref_table_map = { diff --git a/tests/test_mux.py b/tests/test_mux.py index 0bb39bc059..dfe160d287 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -21,6 +21,8 @@ class TestMuxTunnelBase(object): ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" CONFIG_MUX_CABLE = "MUX_CABLE" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" SERV1_IPV4 = "192.168.0.100" @@ -31,7 +33,7 @@ class TestMuxTunnelBase(object): IPV6_MASK = "/128" TUNNEL_NH_ID = 0 ACL_PRIORITY = "999" - + ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" @@ -47,7 +49,7 @@ class TestMuxTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } - TC_TO_DSCP_MAP = {str(i):str{i} for i in range(0, 8)} + TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} def create_vlan_interface(self, confdb, asicdb, dvs): @@ -147,7 +149,7 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): for k in keys: fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg_id - + # Count the number of Nexthop member pointing to tunnel if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] == tunnel_nh_id: count += 1 @@ -191,7 +193,7 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "standby") self.add_neighbor(dvs, self.SERV1_IPV4, "00:00:00:00:00:01") - # Broadcast neigh 192.168.0.255 is default added. Hence +1 for expected number + # Broadcast neigh 192.168.0.255 is default added. Hence +1 for expected number srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4, 2) self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01", True) @@ -334,9 +336,9 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): dvs_route.check_asicdb_deleted_route_entries([rtprefix]) ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - + fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), ("ifname", "Vlan1000,Vlan1000")]) - + ps.set(rtprefix, fvs) # Check if route was propagated to ASIC DB @@ -544,7 +546,7 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc assert p2p_obj != None fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, p2p_obj) - + if tc_to_dscp_map_oid: assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs if tc_to_queue_map_oid: @@ -571,6 +573,8 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc assert value == tc_to_dscp_map_oid elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP": assert value == tc_to_queue_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE": + assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" else: assert False, "Field %s is not tested" % field @@ -578,11 +582,11 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" - + expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: fvs = asicdb.get_entry(self.ASIC_TUNNEL_TERM_ENTRIES, term_entry) - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs.items(): if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": @@ -601,17 +605,14 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid assert False, "Field %s is not tested" % field - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, configdb, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - + is_symmetric_tunnel = "src_ip" in kwargs fvs = create_fvs(**kwargs) - - ps.set(tunnel_name, fvs) + # Write into config db for muxorch, tunnelmgrd will write to APP_DB + configdb_ps = swsscommon.Table(configdb, self.CONFIG_TUNNEL_TABLE_NAME) + configdb_ps.set(tunnel_name, fvs) # wait till config will be applied time.sleep(1) @@ -681,7 +682,7 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) # Apply QoS map to config db - table = swsscommon.Table(configdb, qos_map_type_name) + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) fvs = swsscommon.FieldValuePairs(list(qos_map.items())) table.set(qos_map_name, fvs) time.sleep(1) @@ -689,13 +690,11 @@ def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) assert len(diff) == 1 oid = diff.pop() - fvs_in_asicdb = asicdb.get_entry(self.ASIC_QOS_MAP_TABLE_KEY, oid) - assert(fvs_in_asicdb["SAI_QOS_MAP_ATTR_TYPE"] == qos_map_type_name) return oid - def remove_qos_map(self, configdb, qos_map_oid): + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_oid): """ Remove the testing qos map""" - table = swsscommon.Table(configdb, qos_map_type_name) + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) table._del(qos_map_oid) def cleanup_left_over(self, db, asicdb): @@ -720,17 +719,17 @@ class TestMuxTunnel(TestMuxTunnelBase): def test_Tunnel(self, dvs, testlog): """ test IPv4 Mux tunnel creation """ - db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - dst_ip="10.1.0.32", dscp_mode="pipe", + self.create_and_test_tunnel(configdb, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", + src_ip="10.1.0.33", dst_ip="10.1.0.32", dscp_mode="pipe", ecn_mode="standard", ttl_mode="pipe", encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, - encap_tc_color_to_dscp_map=self.TUNNEL_QOS_MAP_NAME) + encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME) def test_Peer(self, dvs, testlog): @@ -738,14 +737,14 @@ def test_Peer(self, dvs, testlog): db = dvs.get_config_db() asicdb = dvs.get_asic_db() - - tc_to_dscp_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) - tc_to_queue_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", tc_to_dscp_map_oid, tc_to_queue_map_oid) - self.remove_qos_map(configdb, tc_to_dscp_map_oid) - self.remove_qos_map(tc_to_queue_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) def test_Neighbor(self, dvs, dvs_route, testlog): diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index 95f6c611cb..40137f3848 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -15,6 +15,7 @@ class TestTunnelBase(object): ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -32,8 +33,8 @@ class TestTunnelBase(object): } # Define 2 dummy maps - DSCP_TO_TC_MAP = {str(i):str{1} for i in range(0, 64)} - TC_TO_PRIORITY_GROUP_MAP = {str{i}:str{i} for i in range(0, 8)} + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} def check_interface_exists_in_asicdb(self, asicdb, sai_oid): if_table = swsscommon.Table(asicdb, self.ASIC_RIF_TABLE) @@ -52,11 +53,12 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid assert len(tunnel_term_entries) == len(dst_ips) expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: status, fvs = tunnel_term_table.get(term_entry) assert status == True - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs: if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": @@ -79,21 +81,28 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): is_symmetric_tunnel = "src_ip" in kwargs - decap_dscp_to_tc_map_oid = kwargs["decap_dscp_to_tc_map_oid"] if "decap_dscp_to_tc_map_oid" in kwargs else None - decap_tc_to_pg_map_oid = kwargs["decap_tc_to_pg_map_oid"] if "decap_tc_to_pg_map_oid" in kwargs else None + decap_dscp_to_tc_map_oid = None + decap_tc_to_pg_map_oid = None + configdb = None if "decap_dscp_to_tc_map_oid" in kwargs: - kwargs.pop("decap_dscp_to_tc_map_oid") + decap_dscp_to_tc_map_oid = kwargs.pop("decap_dscp_to_tc_map_oid") if "decap_tc_to_pg_map_oid" in kwargs: - kwargs.pop("decap_tc_to_pg_map_oid") - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - + decap_tc_to_pg_map_oid = kwargs.pop("decap_tc_to_pg_map_oid") + + if "configdb" in kwargs: + configdb = kwargs.pop("configdb") + fvs = create_fvs(**kwargs) - - ps.set(tunnel_name, fvs) + if configdb: + # Write into config db for muxorch, tunnelmgrd will write to APP_DB + configdb_ps = swsscommon.Table(configdb, self.CONFIG_TUNNEL_TABLE_NAME) + configdb_ps.set(tunnel_name, fvs) + else: + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + ps.set(tunnel_name, fvs) # wait till config will be applied time.sleep(1) @@ -111,17 +120,19 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert status == True # 6 parameters to check in case of decap tunnel # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + expected_len = 7 if is_symmetric_tunnel else 6 expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] if decap_dscp_to_tc_map_oid: - assert "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP" in fvs + expected_len += 1 if decap_tc_to_pg_map_oid: - assert "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP" in fvs + expected_len += 1 + assert len(fvs) == expected_len + for field, value in fvs: if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" @@ -138,9 +149,9 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": - assert value == dscp_to_tc_map_oid + assert value == decap_dscp_to_tc_map_oid elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": - assert value = tc_to_pg_map_oid + assert value == decap_tc_to_pg_map_oid else: assert False, "Field %s is not tested" % field src_ip = kwargs["src_ip"] if "src_ip" in kwargs else None @@ -174,21 +185,21 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): """ Add qos map for testing""" - current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) + qos_table = swsscommon.Table(asicdb, self.ASIC_QOS_MAP_TABLE_KEY) + current_oids = qos_table.getKeys() + # Apply QoS map to config db table = swsscommon.Table(configdb, qos_map_type_name) fvs = swsscommon.FieldValuePairs(list(qos_map.items())) table.set(qos_map_name, fvs) time.sleep(1) - - diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) + + diff = set(qos_table.getKeys()) - set(current_oids) assert len(diff) == 1 oid = diff.pop() - fvs_in_asicdb = asicdb.get_entry(self.ASIC_QOS_MAP_TABLE_KEY, oid) - assert(fvs_in_asicdb["SAI_QOS_MAP_ATTR_TYPE"] == qos_map_type_name) return oid - def remove_qos_map(self, configdb, qos_map_oid): + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_oid): """ Remove the testing qos map""" table = swsscommon.Table(configdb, qos_map_type_name) table._del(qos_map_oid) @@ -261,13 +272,13 @@ def test_TunnelDecap_MuxTunnel(self, dvs, testlog): "ttl_mode": "uniform", "decap_dscp_to_tc_map": "AZURE_TUNNEL", "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, - "decap_tc_to_pg_map": "AZURE_TUNNEL" + "decap_tc_to_pg_map": "AZURE_TUNNEL", "decap_tc_to_pg_map_oid": tc_to_pg_map_oid } self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) - - self.remove_qos_map(configdb, dscp_to_tc_map_oid) - self.remove_qos_map(configdb, tc_to_pg_map_oid) + + self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) class TestSymmetricTunnel(TestTunnelBase): From 7fcfa687a3bb4e51dd1eecd4535577d8d997782e Mon Sep 17 00:00:00 2001 From: bingwang Date: Wed, 13 Apr 2022 01:43:48 +0000 Subject: [PATCH 07/10] Fix LGTM Signed-off-by: bingwang --- orchagent/tunneldecaporch.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 7fa3af0acf..62cebf0bcf 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -405,14 +405,14 @@ bool TunnelDecapOrch::addDecapTunnel( * Arguments: * @param[in] tunnelKey - key of the tunnel from APP_DB * @param[in] src_ip - source ip address of decap tunnel - * @param[in] dst_ip - destination ip addresses to decap + * @param[in] dst_ips - destination ip addresses to decap * @param[in] tunnel_id - the id of the tunnel * @param[in] term_type - P2P or P2MP. Other types (MP2P and MP2MP) not supported yet * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) +bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ips, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) { SWSS_LOG_ENTER(); @@ -454,7 +454,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres TunnelEntry *tunnel_info = &tunnelTable.find(tunnelKey)->second; // loop through the IP list and create a new tunnel table entry for every IP (in network byte order) - set tunnel_ips = dst_ip.getIpAddresses(); + set tunnel_ips = dst_ips.getIpAddresses(); for (auto it = tunnel_ips.begin(); it != tunnel_ips.end(); ++it) { const IpAddress& ia = *it; From 11efd68da2b3d305b3f24f6ada61d17959c71640 Mon Sep 17 00:00:00 2001 From: bingwang Date: Wed, 20 Apr 2022 13:31:02 +0000 Subject: [PATCH 08/10] Address comments Signed-off-by: bingwang --- orchagent/muxorch.cpp | 70 ++++++++++++++------------- orchagent/muxorch.h | 4 +- orchagent/orchdaemon.cpp | 2 +- orchagent/qosorch.cpp | 34 ++++++++++++- orchagent/qosorch.h | 4 +- orchagent/tunneldecaporch.cpp | 63 ++++++------------------ orchagent/tunneldecaporch.h | 4 +- tests/mock_tests/routeorch_ut.cpp | 2 +- tests/test_mux.py | 80 ++++++++++++++++++++++--------- tests/test_tunnel.py | 16 ++----- 10 files changed, 154 insertions(+), 125 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index d7dc775706..f4f435887f 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -33,6 +33,7 @@ extern RouteOrch *gRouteOrch; extern AclOrch *gAclOrch; extern PortsOrch *gPortsOrch; extern FdbOrch *gFdbOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gUnderlayIfId; @@ -1183,13 +1184,13 @@ void MuxOrch::update(SubjectType type, void *cntx) } } -MuxOrch::MuxOrch(DBConnector *db, const std::vector &tables, +MuxOrch::MuxOrch(DBConnector *cfg_db, DBConnector *app_db, const std::vector &tables, TunnelDecapOrch* decapOrch, NeighOrch* neighOrch, FdbOrch* fdbOrch) : - Orch2(db, tables, request_), + Orch2(cfg_db, tables, request_), decap_orch_(decapOrch), neigh_orch_(neighOrch), fdb_orch_(fdbOrch), - cfgTunnelTable_(db, CFG_TUNNEL_TABLE_NAME) + app_decap_tunnel_table_(app_db, APP_TUNNEL_DECAP_TABLE_NAME) { handler_map_.insert(handler_pair(CFG_MUX_CABLE_TABLE_NAME, &MuxOrch::handleMuxCfg)); @@ -1249,38 +1250,35 @@ bool MuxOrch::handleMuxCfg(const Request& request) bool MuxOrch::resolveQosTableIds() { std::vector field_value_tuples; - if (cfgTunnelTable_.get(MUX_TUNNEL, field_value_tuples)) + if (app_decap_tunnel_table_.get(MUX_TUNNEL, field_value_tuples)) { KeyOpFieldsValuesTuple tuple{"TUNNEL", MUX_TUNNEL, field_value_tuples}; - for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) + // Read tc_to_queue_map_id + tc_to_queue_map_id_ = gQosOrch->resolveTunnelQosMap(app_decap_tunnel_table_.getTableName(), MUX_TUNNEL, encap_tc_to_queue_field_name, tuple); + if (tc_to_queue_map_id_ == SAI_NULL_OBJECT_ID) { - if (qos_to_ref_table_map.find(fvField(*it)) != qos_to_ref_table_map.end()) - { - sai_object_id_t id; - string object_name; - string &map_type_name = fvField(*it); - string &map_name = fvValue(*it); - ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); - if (status == ref_resolve_status::success) - { - if (map_type_name == encap_tc_to_queue_field_name) - { - tc_to_queue_map_id_ = id; - } - else if (map_type_name == encap_tc_to_dscp_field_name) - { - tc_to_dscp_map_id_ = id; - } - setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, MUX_TUNNEL, map_type_name, object_name); - SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str()); - } - } + SWSS_LOG_NOTICE("QoS map for tunnel %s type %s is not set", MUX_TUNNEL, encap_tc_to_queue_field_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s id %" PRId64, MUX_TUNNEL, encap_tc_to_queue_field_name.c_str(), tc_to_queue_map_id_); + } + + // Read tc_to_dscp_map_id + tc_to_dscp_map_id_ = gQosOrch->resolveTunnelQosMap(app_decap_tunnel_table_.getTableName(), MUX_TUNNEL, encap_tc_to_dscp_field_name, tuple); + if (tc_to_dscp_map_id_ == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map for tunnel %s type %s is not set", MUX_TUNNEL, encap_tc_to_dscp_field_name.c_str()); + } + else + { + SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s id %" PRId64, MUX_TUNNEL, encap_tc_to_dscp_field_name.c_str(), tc_to_dscp_map_id_); } return true; } else { - SWSS_LOG_ERROR("Failed to read config from CONFIG_DB for %s", MUX_TUNNEL); + SWSS_LOG_NOTICE("Entry for table %s not created yet in APP_DB", MUX_TUNNEL); return false; } } @@ -1306,17 +1304,23 @@ bool MuxOrch::handlePeerSwitch(const Request& request) MUX_TUNNEL, peer_ip.to_string().c_str()); return false; } + auto it = dst_ips.getIpAddresses().begin(); + const IpAddress& dst_ip = *it; + + // Read dscp_mode of MuxTunnel0 from app_db + string dscp_mode_name = "pipe"; + if (!app_decap_tunnel_table_.hget(MUX_TUNNEL, "dscp_mode", dscp_mode_name)) + { + SWSS_LOG_NOTICE("dscp_mode not available for %s", MUX_TUNNEL); + return false; + } + + // Read tc_to_dscp_map_id and tc_to_queue_map_id if (!resolveQosTableIds()) { return false; } - auto it = dst_ips.getIpAddresses().begin(); - const IpAddress& dst_ip = *it; - // Read dscp_mode of MuxTunnel0 from config_db - string dscp_mode_name = "pipe"; - cfgTunnelTable_.hget(MUX_TUNNEL, "dscp_mode", dscp_mode_name); - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_, dscp_mode_name); SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 6288bca0c6..acc9c2c200 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -156,7 +156,7 @@ class MuxCfgRequest : public Request class MuxOrch : public Orch2, public Observer, public Subject { public: - MuxOrch(DBConnector *db, const std::vector &tables, TunnelDecapOrch*, NeighOrch*, FdbOrch*); + MuxOrch(DBConnector *cfg_db, DBConnector *app_db, const std::vector &tables, TunnelDecapOrch*, NeighOrch*, FdbOrch*); using handler_pair = pair; using handler_map = map; @@ -214,7 +214,7 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; - Table cfgTunnelTable_; + Table app_decap_tunnel_table_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 0f4436ddf8..851bea6869 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -303,7 +303,7 @@ bool OrchDaemon::init() CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_configDb, mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + MuxOrch *mux_orch = new MuxOrch(m_configDb, m_applDb, mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); MuxCableOrch *mux_cb_orch = new MuxCableOrch(m_applDb, m_stateDb, APP_MUX_CABLE_TABLE_NAME); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index d2c3d4992b..24814c3710 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -80,7 +80,7 @@ type_map QosOrch::m_qos_maps = { {CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, {CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_TUNNEL_TABLE_NAME, new object_reference_map()} + {APP_TUNNEL_DECAP_TABLE_NAME, new object_reference_map()} }; map qos_to_ref_table_map = { @@ -1949,3 +1949,35 @@ void QosOrch::doTask(Consumer &consumer) } } } + +/** + * Function Description: + * @brief Resolve the id of QoS map that is referenced by tunnel + * + * Arguments: + * @param[in] referencing_table_name - The name of table that is referencing the QoS map + * @param[in] tunnle_name - The name of tunnel + * @param[in] map_type_name - The type of referenced QoS map + * @param[in] tuple - The KeyOpFieldsValuesTuple that contains keys - values + * + * Return Values: + * @return The sai_object_id of referenced map, or SAI_NULL_OBJECT_ID if there's an error + */ +sai_object_id_t QosOrch::resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple) +{ + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + if (status == ref_resolve_status::success) + { + + setObjectReference(m_qos_maps, referencing_table_name, tunnel_name, map_type_name, object_name); + SWSS_LOG_INFO("Resolved QoS map for table %s tunnel %s type %s name %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str(), object_name.c_str()); + return id; + } + else + { + SWSS_LOG_ERROR("Failed to resolve QoS map for table %s tunnel %s type %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str()); + return SAI_NULL_OBJECT_ID; + } +} diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 05b429432e..d0429f119d 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -60,8 +60,6 @@ const string ecn_green_red = "ecn_green_red"; const string ecn_green_yellow = "ecn_green_yellow"; const string ecn_all = "ecn_all"; -// Declaration for being referenced in muxorch and decaporch -extern std::map qos_to_ref_table_map; class QosMapHandler { public: @@ -168,6 +166,8 @@ class QosOrch : public Orch static type_map& getTypeMap(); static type_map m_qos_maps; + + sai_object_id_t resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple); private: void doTask() override; virtual void doTask(Consumer& consumer); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 62cebf0bcf..d176a75b90 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -18,6 +18,7 @@ extern sai_object_id_t gUnderlayIfId; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern CrmOrch* gCrmOrch; +extern QosOrch* gQosOrch; TunnelDecapOrch::TunnelDecapOrch(DBConnector *db, string tableName) : Orch(db, tableName) { @@ -32,7 +33,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { return; } - + string table_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -41,8 +42,8 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string key = kfvKey(t); string op = kfvOp(t); - IpAddresses dst_ip_addresses; - IpAddress src_ip_address("0.0.0.0"); + IpAddresses ip_addresses; + IpAddress src_ip; IpAddress* p_src_ip = nullptr; string tunnel_type; string dscp_mode; @@ -51,7 +52,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ttl_mode; sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; - TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; bool valid = true; @@ -83,7 +83,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { try { - dst_ip_addresses = IpAddresses(fvValue(i)); + ip_addresses = IpAddresses(fvValue(i)); } catch (const std::invalid_argument &e) { @@ -93,17 +93,15 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setIpAttribute(key, dst_ip_addresses, tunnelTable.find(key)->second.tunnel_id); + setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); } } else if (fvField(i) == "src_ip") { try { - src_ip_address = IpAddress(fvValue(i)); - p_src_ip = &src_ip_address; - //Tunnel term type is set to P2P when source ip is present - term_type = TUNNEL_TERM_TYPE_P2P; + src_ip = IpAddress(fvValue(i)); + p_src_ip = &src_ip; } catch (const std::invalid_argument &e) { @@ -174,7 +172,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } else if (fvField(i) == decap_dscp_to_tc_field_name) { - dscp_to_dc_map_id = resolveQosMapId(key, decap_dscp_to_tc_field_name, t); + dscp_to_dc_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_dscp_to_tc_field_name, t); if (exists && dscp_to_dc_map_id != SAI_NULL_OBJECT_ID) { setTunnelAttribute(fvField(i), dscp_to_dc_map_id, tunnel_id); @@ -182,7 +180,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } else if (fvField(i) == decap_tc_to_pg_field_name) { - tc_to_pg_map_id = resolveQosMapId(key, decap_tc_to_pg_field_name, t); + tc_to_pg_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_tc_to_pg_field_name, t); if (exists && tc_to_pg_map_id != SAI_NULL_OBJECT_ID) { setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); @@ -194,8 +192,8 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (valid && !exists) { - if (addDecapTunnel(key, tunnel_type, dst_ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, - term_type, dscp_to_dc_map_id, tc_to_pg_map_id)) + if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_dc_map_id, tc_to_pg_map_id)) { SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } @@ -233,7 +231,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) * @param[in] dscp - dscp mode (uniform/pipe) * @param[in] ecn - ecn mode (copy_from_outer/standard) * @param[in] ttl - ttl mode (uniform/pipe) - * @param[in] term_type - The type of tunnel term * @param[in] dscp_to_tc_map_id - Map ID for remapping DSCP to TC (decap) * @param[in] tc_to_pg_map_id - Map ID for remapping TC to PG (decap) * @@ -249,7 +246,6 @@ bool TunnelDecapOrch::addDecapTunnel( string ecn, string encap_ecn, string ttl, - TunnelTermType term_type, sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id) { @@ -262,6 +258,7 @@ bool TunnelDecapOrch::addDecapTunnel( sai_attribute_t attr; vector tunnel_attrs; sai_object_id_t overlayIfId; + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; // create the overlay router interface to create a LOOPBACK type router interface (decap) vector overlay_intf_attrs; @@ -310,6 +307,7 @@ bool TunnelDecapOrch::addDecapTunnel( copy(attr.value.ipaddr, p_src_ip->to_string()); tunnel_attrs.push_back(attr); src_ip = *p_src_ip; + term_type = TUNNEL_TERM_TYPE_P2P; } // decap ecn mode (copy from outer/standard) @@ -474,7 +472,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres // check if the there's an entry already for the key pair if (existingIps.find(key) != existingIps.end()) { - SWSS_LOG_ERROR("%s already exists. Did not create entry.", key.c_str()); + SWSS_LOG_NOTICE("%s already exists. Did not create entry.", key.c_str()); } else { @@ -941,34 +939,3 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) return tunnelTable[tunnelKey].dst_ip_addrs; } -/** - * Function Description: - * @brief Resolve the map id from QosOrch - * - * Arguments: - * @param[in] tunnle_name - The name of tunnel - * @param[in] map_type_name - The type of referenced QoS map - * @param[in] tuple - The KeyOpFieldsValuesTuple that contains keys - values - * - * Return Values: - * @return The sai_object_id of referenced map, or SAI_NULL_OBJECT_ID if there's an error - */ -sai_object_id_t TunnelDecapOrch::resolveQosMapId(std::string tunnle_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple) -{ - sai_object_id_t id; - string object_name; - ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); - if (status == ref_resolve_status::success) - { - - setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, tunnle_name, map_type_name, object_name); - SWSS_LOG_INFO("Resolved QoS map for tunnel %s type %s name %s", tunnle_name.c_str(), map_type_name.c_str(), object_name.c_str()); - return id; - } - else - { - SWSS_LOG_ERROR("Failed to resolce QoS map for tunnel %s type %s", tunnle_name.c_str(), map_type_name.c_str()); - return SAI_NULL_OBJECT_ID; - } - -} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index d286bfc0d7..8338dda3b6 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -72,7 +72,7 @@ class TunnelDecapOrch : public Orch TunnelNhs tunnelNhs; bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, - std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, TunnelTermType term_type, + std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id); bool removeDecapTunnel(std::string key); @@ -88,7 +88,5 @@ class TunnelDecapOrch : public Orch int decNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); void doTask(Consumer& consumer); - - sai_object_id_t resolveQosMapId(std::string tunnle_name, std::string map_type_name, swss::KeyOpFieldsValuesTuple& tuple); }; #endif diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 84f92a088c..9b3dad849e 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -212,7 +212,7 @@ namespace routeorch_test CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), m_app_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); ASSERT_EQ(gFgNhgOrch, nullptr); diff --git a/tests/test_mux.py b/tests/test_mux.py index dfe160d287..8ba4932ea9 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -51,6 +51,8 @@ class TestMuxTunnelBase(object): TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} def create_vlan_interface(self, confdb, asicdb, dvs): @@ -605,14 +607,29 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid assert False, "Field %s is not tested" % field - def create_and_test_tunnel(self, configdb, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ is_symmetric_tunnel = "src_ip" in kwargs + + # 6 parameters to check in case of decap tunnel + # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel + expected_len = 7 if is_symmetric_tunnel else 6 + + if 'decap_tc_to_pg_map_id' in kwargs: + expected_len += 1 + decap_tc_to_pg_map_id = kwargs.pop('decap_tc_to_pg_map_id') + + if 'decap_dscp_to_tc_map_id' in kwargs: + expected_len += 1 + decap_dscp_to_tc_map_id = kwargs.pop('decap_dscp_to_tc_map_id') + + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + fvs = create_fvs(**kwargs) - # Write into config db for muxorch, tunnelmgrd will write to APP_DB - configdb_ps = swsscommon.Table(configdb, self.CONFIG_TUNNEL_TABLE_NAME) - configdb_ps.set(tunnel_name, fvs) + + ps.set(tunnel_name, fvs) # wait till config will be applied time.sleep(1) @@ -624,14 +641,13 @@ def create_and_test_tunnel(self, configdb, asicdb, tunnel_name, **kwargs): fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, tunnel_sai_obj) - # 6 parameters to check in case of decap tunnel - # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + assert len(fvs) == expected_len expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" @@ -647,6 +663,10 @@ def create_and_test_tunnel(self, configdb, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == decap_dscp_to_tc_map_id + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value == decap_tc_to_pg_map_id else: assert False, "Field %s is not tested" % field src_ip = kwargs['src_ip'] if 'src_ip' in kwargs else None @@ -715,37 +735,53 @@ def cleanup_left_over(self, db, asicdb): class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ + @pytest.fixture(scope='class') + def setup(self, dvs): + db = dvs.get_config_db() + asicdb = dvs.get_asic_db() + + tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + dscp_to_tc_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + yield tc_to_dscp_map_oid, tc_to_queue_map_oid, dscp_to_tc_map_oid, tc_to_pg_map_oid + + self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) + self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) - def test_Tunnel(self, dvs, testlog): - """ test IPv4 Mux tunnel creation """ - configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + def test_Tunnel(self, dvs, testlog, setup): + """ test IPv4 Mux tunnel creation """ + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) - + _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup # create tunnel IPv4 tunnel - self.create_and_test_tunnel(configdb, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", src_ip="10.1.0.33", dst_ip="10.1.0.32", dscp_mode="pipe", ecn_mode="standard", ttl_mode="pipe", encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, - encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME) + encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME, + decap_dscp_to_tc_map=self.TUNNEL_QOS_MAP_NAME, + decap_dscp_to_tc_map_id = dscp_to_tc_map_oid, + decap_tc_to_pg_map=self.TUNNEL_QOS_MAP_NAME, + decap_tc_to_pg_map_id=tc_to_pg_map_oid) - def test_Peer(self, dvs, testlog): + def test_Peer(self, dvs, testlog, setup): """ test IPv4 Mux tunnel creation """ db = dvs.get_config_db() asicdb = dvs.get_asic_db() + + encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup - tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) - tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) - - self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", tc_to_dscp_map_oid, tc_to_queue_map_oid) - - self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) - self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) - + self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) def test_Neighbor(self, dvs, dvs_route, testlog): """ test Neighbor entries and mux state change """ diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index 40137f3848..8c4f8d7408 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -83,7 +83,6 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): decap_dscp_to_tc_map_oid = None decap_tc_to_pg_map_oid = None - configdb = None if "decap_dscp_to_tc_map_oid" in kwargs: decap_dscp_to_tc_map_oid = kwargs.pop("decap_dscp_to_tc_map_oid") @@ -91,18 +90,11 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): if "decap_tc_to_pg_map_oid" in kwargs: decap_tc_to_pg_map_oid = kwargs.pop("decap_tc_to_pg_map_oid") - if "configdb" in kwargs: - configdb = kwargs.pop("configdb") - fvs = create_fvs(**kwargs) - if configdb: - # Write into config db for muxorch, tunnelmgrd will write to APP_DB - configdb_ps = swsscommon.Table(configdb, self.CONFIG_TUNNEL_TABLE_NAME) - configdb_ps.set(tunnel_name, fvs) - else: - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - ps.set(tunnel_name, fvs) + + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + ps.set(tunnel_name, fvs) # wait till config will be applied time.sleep(1) From a82a593486a7f9bca97b6a57fe6b6aef2065b144 Mon Sep 17 00:00:00 2001 From: bingwang Date: Fri, 22 Apr 2022 11:29:56 +0000 Subject: [PATCH 09/10] Remove app_db from muxorch Signed-off-by: bingwang --- orchagent/muxorch.cpp | 69 +++++++++---------------------- orchagent/muxorch.h | 7 +--- orchagent/orchdaemon.cpp | 2 +- orchagent/tunneldecaporch.cpp | 66 +++++++++++++++++++++++++++-- orchagent/tunneldecaporch.h | 14 ++++--- tests/mock_tests/routeorch_ut.cpp | 2 +- 6 files changed, 93 insertions(+), 67 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index f4f435887f..32a7037c29 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -1184,14 +1184,12 @@ void MuxOrch::update(SubjectType type, void *cntx) } } -MuxOrch::MuxOrch(DBConnector *cfg_db, DBConnector *app_db, const std::vector &tables, +MuxOrch::MuxOrch(DBConnector *db, const std::vector &tables, TunnelDecapOrch* decapOrch, NeighOrch* neighOrch, FdbOrch* fdbOrch) : - Orch2(cfg_db, tables, request_), + Orch2(db, tables, request_), decap_orch_(decapOrch), neigh_orch_(neighOrch), - fdb_orch_(fdbOrch), - app_decap_tunnel_table_(app_db, APP_TUNNEL_DECAP_TABLE_NAME) - + fdb_orch_(fdbOrch) { handler_map_.insert(handler_pair(CFG_MUX_CABLE_TABLE_NAME, &MuxOrch::handleMuxCfg)); handler_map_.insert(handler_pair(CFG_PEER_SWITCH_TABLE_NAME, &MuxOrch::handlePeerSwitch)); @@ -1245,44 +1243,6 @@ bool MuxOrch::handleMuxCfg(const Request& request) return true; } -// Retrieve tc_to_queue_map and tc_to_dscp_map from CONFIG_DB, and -// resolve the ids from QosOrch -bool MuxOrch::resolveQosTableIds() -{ - std::vector field_value_tuples; - if (app_decap_tunnel_table_.get(MUX_TUNNEL, field_value_tuples)) - { - KeyOpFieldsValuesTuple tuple{"TUNNEL", MUX_TUNNEL, field_value_tuples}; - // Read tc_to_queue_map_id - tc_to_queue_map_id_ = gQosOrch->resolveTunnelQosMap(app_decap_tunnel_table_.getTableName(), MUX_TUNNEL, encap_tc_to_queue_field_name, tuple); - if (tc_to_queue_map_id_ == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_NOTICE("QoS map for tunnel %s type %s is not set", MUX_TUNNEL, encap_tc_to_queue_field_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s id %" PRId64, MUX_TUNNEL, encap_tc_to_queue_field_name.c_str(), tc_to_queue_map_id_); - } - - // Read tc_to_dscp_map_id - tc_to_dscp_map_id_ = gQosOrch->resolveTunnelQosMap(app_decap_tunnel_table_.getTableName(), MUX_TUNNEL, encap_tc_to_dscp_field_name, tuple); - if (tc_to_dscp_map_id_ == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_NOTICE("QoS map for tunnel %s type %s is not set", MUX_TUNNEL, encap_tc_to_dscp_field_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s id %" PRId64, MUX_TUNNEL, encap_tc_to_dscp_field_name.c_str(), tc_to_dscp_map_id_); - } - return true; - } - else - { - SWSS_LOG_NOTICE("Entry for table %s not created yet in APP_DB", MUX_TUNNEL); - return false; - } -} - bool MuxOrch::handlePeerSwitch(const Request& request) { SWSS_LOG_ENTER(); @@ -1307,21 +1267,30 @@ bool MuxOrch::handlePeerSwitch(const Request& request) auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - // Read dscp_mode of MuxTunnel0 from app_db - string dscp_mode_name = "pipe"; - if (!app_decap_tunnel_table_.hget(MUX_TUNNEL, "dscp_mode", dscp_mode_name)) + // Read dscp_mode of MuxTunnel0 from decap_orch + string dscp_mode_name = decap_orch_->getDscpMode(MUX_TUNNEL); + if (dscp_mode_name == "") { - SWSS_LOG_NOTICE("dscp_mode not available for %s", MUX_TUNNEL); + SWSS_LOG_INFO("dscp_mode for tunnel %s is not available yet", MUX_TUNNEL); return false; } - // Read tc_to_dscp_map_id and tc_to_queue_map_id - if (!resolveQosTableIds()) + // Read tc_to_dscp_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + if (!decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_dscp_field_name, tc_to_dscp_map_id)) + { + SWSS_LOG_INFO("tc_to_dscp_map_id for tunnel %s is not available yet", MUX_TUNNEL); + return false; + } + // Read tc_to_queue_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + if (!decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_queue_field_name, tc_to_queue_map_id)) { + SWSS_LOG_INFO("tc_to_queue_map_id for tunnel %s is not available yet", MUX_TUNNEL); return false; } - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_, dscp_mode_name); + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id, dscp_mode_name); SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index acc9c2c200..6e4f70408c 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -156,7 +156,7 @@ class MuxCfgRequest : public Request class MuxOrch : public Orch2, public Observer, public Subject { public: - MuxOrch(DBConnector *cfg_db, DBConnector *app_db, const std::vector &tables, TunnelDecapOrch*, NeighOrch*, FdbOrch*); + MuxOrch(DBConnector *db, const std::vector &tables, TunnelDecapOrch*, NeighOrch*, FdbOrch*); using handler_pair = pair; using handler_map = map; @@ -196,12 +196,8 @@ class MuxOrch : public Orch2, public Observer, public Subject bool getMuxPort(const MacAddress&, const string&, string&); - bool resolveQosTableIds(); - IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; - sai_object_id_t tc_to_queue_map_id_ = SAI_NULL_OBJECT_ID; - sai_object_id_t tc_to_dscp_map_id_ = SAI_NULL_OBJECT_ID; MuxCableTb mux_cable_tb_; MuxTunnelNHs mux_tunnel_nh_; @@ -214,7 +210,6 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; - Table app_decap_tunnel_table_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 851bea6869..0f4436ddf8 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -303,7 +303,7 @@ bool OrchDaemon::init() CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_configDb, m_applDb, mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + MuxOrch *mux_orch = new MuxOrch(m_configDb, mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); MuxCableOrch *mux_cb_orch = new MuxCableOrch(m_applDb, m_stateDb, APP_MUX_CABLE_TABLE_NAME); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index d176a75b90..ed3820bde6 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -52,6 +52,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ttl_mode; sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; + // The tc_to_dscp_map_id and tc_to_queue_map_id are parsed here for muxorch to retrieve + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; bool valid = true; @@ -126,6 +129,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (exists) { setTunnelAttribute(fvField(i), dscp_mode, tunnel_id); + tunnelTable[key].dscp_mode = dscp_mode; } } else if (fvField(i) == "ecn_mode") @@ -186,6 +190,24 @@ void TunnelDecapOrch::doTask(Consumer& consumer) setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); } } + else if (fvField(i) == encap_tc_to_dscp_field_name) + { + tc_to_dscp_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_dscp_field_name, t); + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + } + } + else if (fvField(i) == encap_tc_to_queue_field_name) + { + tc_to_queue_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_queue_field_name, t); + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; + } + } } //create new tunnel if it doesn't exists already @@ -195,6 +217,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, dscp_to_dc_map_id, tc_to_pg_map_id)) { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } else @@ -385,7 +410,7 @@ bool TunnelDecapOrch::addDecapTunnel( } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {} }; + tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, dscp, SAI_NULL_OBJECT_ID, SAI_NULL_OBJECT_ID }; // create a decap tunnel entry for every source_ip - dest_ip pair if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) @@ -609,15 +634,14 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sa sai_attribute_t attr; - if (field == "decap_dscp_to_tc_map") + if (field == decap_dscp_to_tc_field_name) { // TC remapping. attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; attr.value.oid = value; } - - if (field == "decap_tc_to_pg_map") + else if (field == decap_tc_to_pg_field_name) { // TC to PG remapping attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; @@ -939,3 +963,37 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) return tunnelTable[tunnelKey].dst_ip_addrs; } +std::string TunnelDecapOrch::getDscpMode(const std::string &tunnelKey) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return ""; + } + return iter->second.dscp_mode; +} + +bool TunnelDecapOrch::getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return false; + } + if (qos_table_type == encap_tc_to_dscp_field_name) + { + oid = iter->second.encap_tc_to_dscp_map_id; + } + else if (qos_table_type == encap_tc_to_queue_field_name) + { + oid = iter->second.encap_tc_to_queue_map_id; + } + else + { + SWSS_LOG_ERROR("Unsupported qos type %s", qos_table_type.c_str()); + return false; + } + return true; +} \ No newline at end of file diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 8338dda3b6..04d7928e37 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -30,10 +30,13 @@ struct TunnelTermEntry struct TunnelEntry { - sai_object_id_t tunnel_id; // tunnel id - sai_object_id_t overlay_intf_id; // overlay interface id - swss::IpAddresses dst_ip_addrs; // destination ip addresses - std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + sai_object_id_t tunnel_id; // tunnel id + sai_object_id_t overlay_intf_id; // overlay interface id + swss::IpAddresses dst_ip_addrs; // destination ip addresses + std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + std::string dscp_mode; // dscp_mode, will be used in muxorch + sai_object_id_t encap_tc_to_dscp_map_id; // TC_TO_DSCP map id, will be used in muxorch + sai_object_id_t encap_tc_to_queue_map_id; // TC_TO_QUEUE map id, will be used in muxorch }; struct NexthopTunnel @@ -65,7 +68,8 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); swss::IpAddresses getDstIpAddresses(std::string tunnelKey); - + std::string getDscpMode(const std::string &tunnelKey) const; + bool getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const; private: TunnelTable tunnelTable; ExistingIps existingIps; diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 9b3dad849e..84f92a088c 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -212,7 +212,7 @@ namespace routeorch_test CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), m_app_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); ASSERT_EQ(gFgNhgOrch, nullptr); From 151255d8fe04605b2bee6b5a3da8fa7dc83784fb Mon Sep 17 00:00:00 2001 From: bingwang Date: Tue, 26 Apr 2022 06:22:18 +0000 Subject: [PATCH 10/10] Update muxorch to ignore non-existing attributes Signed-off-by: bingwang --- orchagent/muxorch.cpp | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 32a7037c29..fb45e0132d 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -212,18 +212,21 @@ static sai_object_id_t create_tunnel( attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); - sai_tunnel_dscp_mode_t dscp_mode; - if (dscp_mode_name == "uniform") + if (dscp_mode_name == "uniform" || dscp_mode_name == "pipe") { - dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; - } - else - { - dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + sai_tunnel_dscp_mode_t dscp_mode; + if (dscp_mode_name == "uniform") + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; + } + else + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + } + attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); } - attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; - attr.value.s32 = dscp_mode; - tunnel_attrs.push_back(attr); attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_DROP; @@ -1271,23 +1274,22 @@ bool MuxOrch::handlePeerSwitch(const Request& request) string dscp_mode_name = decap_orch_->getDscpMode(MUX_TUNNEL); if (dscp_mode_name == "") { - SWSS_LOG_INFO("dscp_mode for tunnel %s is not available yet", MUX_TUNNEL); - return false; + SWSS_LOG_NOTICE("dscp_mode for tunnel %s is not available. Will not be applied", MUX_TUNNEL); } // Read tc_to_dscp_map_id of MuxTunnel0 from decap_orch sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; - if (!decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_dscp_field_name, tc_to_dscp_map_id)) + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_dscp_field_name, tc_to_dscp_map_id); + if (tc_to_dscp_map_id == SAI_NULL_OBJECT_ID) { - SWSS_LOG_INFO("tc_to_dscp_map_id for tunnel %s is not available yet", MUX_TUNNEL); - return false; + SWSS_LOG_NOTICE("tc_to_dscp_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); } // Read tc_to_queue_map_id of MuxTunnel0 from decap_orch sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; - if (!decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_queue_field_name, tc_to_queue_map_id)) + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_queue_field_name, tc_to_queue_map_id); + if (tc_to_queue_map_id == SAI_NULL_OBJECT_ID) { - SWSS_LOG_INFO("tc_to_queue_map_id for tunnel %s is not available yet", MUX_TUNNEL); - return false; + SWSS_LOG_NOTICE("tc_to_queue_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); } mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id, dscp_mode_name);