diff --git a/configure.ac b/configure.ac index 231f1e1c58..6edc02da91 100644 --- a/configure.ac +++ b/configure.ac @@ -136,7 +136,7 @@ if test "x$asan_enabled" = "xtrue"; then CFLAGS_ASAN+=" -Wno-maybe-uninitialized" AC_SUBST(CFLAGS_ASAN) - LDFLAGS_ASAN+=" -lasan" + LDFLAGS_ASAN+=" -fsanitize=address" AC_SUBST(LDFLAGS_ASAN) fi diff --git a/fdbsyncd/fdbsyncd.cpp b/fdbsyncd/fdbsyncd.cpp index 4f9405cbfd..70bff7b79f 100644 --- a/fdbsyncd/fdbsyncd.cpp +++ b/fdbsyncd/fdbsyncd.cpp @@ -16,10 +16,10 @@ int main(int argc, char **argv) { Logger::linkToDbNative("fdbsyncd"); - DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector appDb("APPL_DB", 0); RedisPipeline pipelineAppDB(&appDb); - DBConnector stateDb(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector config_db(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector stateDb("STATE_DB", 0); + DBConnector config_db("CONFIG_DB", 0); FdbSync sync(&pipelineAppDB, &stateDb, &config_db); diff --git a/gearsyncd/gearparserbase.cpp b/gearsyncd/gearparserbase.cpp index c6cae36253..e86e34e10c 100644 --- a/gearsyncd/gearparserbase.cpp +++ b/gearsyncd/gearparserbase.cpp @@ -24,7 +24,7 @@ GearParserBase::init() { m_writeToDb = false; m_rootInit = false; - m_applDb = std::unique_ptr{new swss::DBConnector(APPL_DB, swss::DBConnector::DEFAULT_UNIXSOCKET, 0)}; + m_applDb = std::unique_ptr{new swss::DBConnector("APPL_DB", 0)}; m_producerStateTable = std::unique_ptr{new swss::ProducerStateTable(m_applDb.get(), APP_GEARBOX_TABLE_NAME)}; } diff --git a/gearsyncd/gearsyncd.cpp b/gearsyncd/gearsyncd.cpp index f79b079d82..55b6eca0a3 100644 --- a/gearsyncd/gearsyncd.cpp +++ b/gearsyncd/gearsyncd.cpp @@ -74,8 +74,8 @@ int main(int argc, char **argv) } } - DBConnector cfgDb(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector applDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector applDb("APPL_DB", 0); ProducerStateTable producerStateTable(&applDb, APP_GEARBOX_TABLE_NAME); WarmStart::initialize("gearsyncd", "swss"); diff --git a/orchagent/dash/dashorch.cpp b/orchagent/dash/dashorch.cpp index f4538797b2..0e2db37007 100644 --- a/orchagent/dash/dashorch.cpp +++ b/orchagent/dash/dashorch.cpp @@ -18,6 +18,7 @@ #include "crmorch.h" #include "saihelper.h" #include "directory.h" +#include "flex_counter_manager.h" #include "taskworker.h" #include "pbutils.h" @@ -28,16 +29,45 @@ using namespace swss; extern Directory gDirectory; extern std::unordered_map gVnetNameToId; +extern sai_dash_appliance_api_t* sai_dash_appliance_api; extern sai_dash_vip_api_t* sai_dash_vip_api; extern sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; extern sai_dash_eni_api_t* sai_dash_eni_api; extern sai_object_id_t gSwitchId; extern size_t gMaxBulkSize; extern CrmOrch *gCrmOrch; +extern bool gTraditionalFlexCounter; -DashOrch::DashOrch(DBConnector *db, vector &tableName, ZmqServer *zmqServer) : ZmqOrch(db, tableName, zmqServer) +#define FLEX_COUNTER_UPD_INTERVAL 1 + +DashOrch::DashOrch(DBConnector *db, vector &tableName, ZmqServer *zmqServer) : + ZmqOrch(db, tableName, zmqServer), + m_eni_stat_manager(ENI_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, ENI_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false) { SWSS_LOG_ENTER(); + + m_asic_db = std::shared_ptr(new DBConnector("ASIC_DB", 0)); + m_counter_db = std::shared_ptr(new DBConnector("COUNTERS_DB", 0)); + m_eni_name_table = std::unique_ptr(new Table(m_counter_db.get(), COUNTERS_ENI_NAME_MAP)); + + if (gTraditionalFlexCounter) + { + m_vid_to_rid_table = std::make_unique
(m_asic_db.get(), "VIDTORID"); + } + + auto intervT = timespec { .tv_sec = FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; + m_fc_update_timer = new SelectableTimer(intervT); + auto executorT = new ExecutableTimer(m_fc_update_timer, this, "FLEX_COUNTER_UPD_TIMER"); + Orch::addExecutor(executorT); + + /* Fetch the available counter Ids */ + m_counter_stats.clear(); + auto stat_enum_list = queryAvailableCounterStats((sai_object_type_t)SAI_OBJECT_TYPE_ENI); + for (auto &stat_enum: stat_enum_list) + { + auto counter_id = static_cast(stat_enum); + m_counter_stats.insert(sai_serialize_eni_stat(counter_id)); + } } bool DashOrch::getRouteTypeActions(dash::route_type::RoutingType routing_type, dash::route_type::RouteType& route_type) @@ -66,15 +96,31 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian } uint32_t attr_count = 1; + sai_attribute_t appliance_attr; + sai_status_t status; + + // NOTE: DASH Appliance object should be the first object pushed to SAI + sai_object_id_t sai_appliance_id = 0UL; + appliance_attr.id = SAI_DASH_APPLIANCE_ATTR_LOCAL_REGION_ID; + appliance_attr.value.u32 = entry.local_region_id(); + status = sai_dash_appliance_api->create_dash_appliance(&sai_appliance_id, gSwitchId, + attr_count, &appliance_attr); + if (status != SAI_STATUS_SUCCESS && status != SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_ERROR("Failed to create dash appliance object in SAI for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_APPLIANCE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + sai_vip_entry_t vip_entry; vip_entry.switch_id = gSwitchId; if (!to_sai(entry.sip(), vip_entry.vip)) { return false; } - sai_attribute_t appliance_attr; - vector appliance_attrs; - sai_status_t status; appliance_attr.id = SAI_VIP_ENTRY_ATTR_ACTION; appliance_attr.value.u32 = SAI_VIP_ENTRY_ACTION_ACCEPT; status = sai_dash_vip_api->create_vip_entry(&vip_entry, attr_count, &appliance_attr); @@ -103,8 +149,8 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian return parseHandleSaiStatusFailure(handle_status); } } - appliance_entries_[appliance_id] = entry; - SWSS_LOG_NOTICE("Created vip and direction lookup entries for %s", appliance_id.c_str()); + appliance_entries_[appliance_id] = ApplianceEntry { sai_appliance_id, entry }; + SWSS_LOG_NOTICE("Created appliance, vip and direction lookup entries for %s", appliance_id.c_str()); return true; } @@ -114,7 +160,6 @@ bool DashOrch::removeApplianceEntry(const string& appliance_id) SWSS_LOG_ENTER(); sai_status_t status; - dash::appliance::Appliance entry; if (appliance_entries_.find(appliance_id) == appliance_entries_.end()) { @@ -122,7 +167,7 @@ bool DashOrch::removeApplianceEntry(const string& appliance_id) return true; } - entry = appliance_entries_[appliance_id]; + const auto& entry = appliance_entries_[appliance_id].metadata; sai_vip_entry_t vip_entry; vip_entry.switch_id = gSwitchId; if (!to_sai(entry.sip(), vip_entry.vip)) @@ -153,8 +198,23 @@ bool DashOrch::removeApplianceEntry(const string& appliance_id) return parseHandleSaiStatusFailure(handle_status); } } + + auto sai_appliance_id = appliance_entries_[appliance_id].appliance_id; + if (sai_appliance_id != 0UL) + { + status = sai_dash_appliance_api->remove_dash_appliance(sai_appliance_id); + if (status != SAI_STATUS_SUCCESS && status != SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_ERROR("Failed to remove dash appliance object in SAI for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_APPLIANCE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } appliance_entries_.erase(appliance_id); - SWSS_LOG_NOTICE("Removed vip and direction lookup entries for %s", appliance_id.c_str()); + SWSS_LOG_NOTICE("Removed appliance, vip and direction lookup entries for %s", appliance_id.c_str()); return true; } @@ -383,7 +443,7 @@ bool DashOrch::addEniObject(const string& eni, EniEntry& entry) eni_attrs.push_back(eni_attr); eni_attr.id = SAI_ENI_ATTR_VM_VNI; - auto app_entry = appliance_entries_.begin()->second; + auto& app_entry = appliance_entries_.begin()->second.metadata; eni_attr.value.u32 = app_entry.vm_vni(); eni_attrs.push_back(eni_attr); @@ -417,6 +477,8 @@ bool DashOrch::addEniObject(const string& eni, EniEntry& entry) } } + addEniToFC(eni_id, eni); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI); SWSS_LOG_NOTICE("Created ENI object for %s", eni.c_str()); @@ -499,6 +561,9 @@ bool DashOrch::removeEniObject(const string& eni) SWSS_LOG_ENTER(); EniEntry entry = eni_entries_[eni]; + + removeEniFromFC(entry.eni_id, eni); + sai_status_t status = sai_dash_eni_api->remove_eni(entry.eni_id); if (status != SAI_STATUS_SUCCESS) { @@ -881,3 +946,93 @@ void DashOrch::doTask(ConsumerBase& consumer) SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); } } + +void DashOrch::removeEniFromFC(sai_object_id_t oid, const string &name) +{ + SWSS_LOG_ENTER(); + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Cannot remove counter on NULL OID for eni %s", name.c_str()); + return; + } + + if (m_eni_stat_work_queue.find(oid) != m_eni_stat_work_queue.end()) + { + m_eni_stat_work_queue.erase(oid); + return; + } + + m_eni_name_table->hdel("", name); + m_eni_stat_manager.clearCounterIdList(oid); + SWSS_LOG_DEBUG("Unregistered eni %s to Flex counter", name.c_str()); +} + +void DashOrch::clearEniFCStats() +{ + for (auto it = eni_entries_.begin(); it != eni_entries_.end(); it++) + { + removeEniFromFC(it->second.eni_id, it->first); + } +} + +void DashOrch::handleFCStatusUpdate(bool enabled) +{ + if (!enabled && m_eni_fc_status) + { + m_fc_update_timer->stop(); + clearEniFCStats(); + } + else if (enabled && !m_eni_fc_status) + { + m_fc_update_timer->start(); + } + m_eni_fc_status = enabled; +} + +void DashOrch::addEniToFC(sai_object_id_t oid, const string &name) +{ + auto was_empty = m_eni_stat_work_queue.empty(); + m_eni_stat_work_queue[oid] = name; + if (was_empty) + { + m_fc_update_timer->start(); + } +} + +void DashOrch::doTask(SelectableTimer &timer) +{ + SWSS_LOG_ENTER(); + + if (!m_eni_fc_status) + { + m_fc_update_timer->stop(); + return ; + } + + for (auto it = m_eni_stat_work_queue.begin(); it != m_eni_stat_work_queue.end(); ) + { + string value; + const auto id = sai_serialize_object_id(it->first); + + if (!gTraditionalFlexCounter || m_vid_to_rid_table->hget("", id, value)) + { + SWSS_LOG_INFO("Registering %s, id %s", it->second.c_str(), id.c_str()); + std::vector eniNameFvs; + eniNameFvs.emplace_back(it->second, id); + m_eni_name_table->set("", eniNameFvs); + + m_eni_stat_manager.setCounterIdList(it->first, CounterType::ENI, m_counter_stats); + it = m_eni_stat_work_queue.erase(it); + } + else + { + ++it; + } + } + + if (m_eni_stat_work_queue.empty()) + { + m_fc_update_timer->stop(); + } +} diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h index e2b6e3554d..3cbe7bba6c 100644 --- a/orchagent/dash/dashorch.h +++ b/orchagent/dash/dashorch.h @@ -17,6 +17,7 @@ #include "timer.h" #include "zmqorch.h" #include "zmqserver.h" +#include "flex_counter_manager.h" #include "dash_api/appliance.pb.h" #include "dash_api/route_type.pb.h" @@ -24,13 +25,22 @@ #include "dash_api/qos.pb.h" #include "dash_api/eni_route.pb.h" +#define ENI_STAT_COUNTER_FLEX_COUNTER_GROUP "ENI_STAT_COUNTER" +#define ENI_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 + struct EniEntry { sai_object_id_t eni_id; dash::eni::Eni metadata; }; -typedef std::map ApplianceTable; +struct ApplianceEntry +{ + sai_object_id_t appliance_id; + dash::appliance::Appliance metadata; +}; + +typedef std::map ApplianceTable; typedef std::map RoutingTypeTable; typedef std::map EniTable; typedef std::map QosTable; @@ -42,6 +52,7 @@ class DashOrch : public ZmqOrch DashOrch(swss::DBConnector *db, std::vector &tables, swss::ZmqServer *zmqServer); const EniEntry *getEni(const std::string &eni) const; bool getRouteTypeActions(dash::route_type::RoutingType routing_type, dash::route_type::RouteType& route_type); + void handleFCStatusUpdate(bool is_enabled); private: ApplianceTable appliance_entries_; @@ -71,4 +82,20 @@ class DashOrch : public ZmqOrch bool removeQosEntry(const std::string& qos_name); bool setEniRoute(const std::string& eni, const dash::eni_route::EniRoute& entry); bool removeEniRoute(const std::string& eni); + +private: + std::map m_eni_stat_work_queue; + FlexCounterManager m_eni_stat_manager; + bool m_eni_fc_status = false; + std::unordered_set m_counter_stats; + std::unique_ptr m_eni_name_table; + std::unique_ptr m_vid_to_rid_table; + std::shared_ptr m_counter_db; + std::shared_ptr m_asic_db; + swss::SelectableTimer* m_fc_update_timer = nullptr; + + void doTask(swss::SelectableTimer&); + void addEniToFC(sai_object_id_t oid, const std::string& name); + void removeEniFromFC(sai_object_id_t oid, const std::string& name); + void clearEniFCStats(); }; diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index c29c4b9d6e..635c757601 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -49,6 +49,7 @@ const unordered_map FlexCounterManager::counter_id_field_lo { CounterType::TUNNEL, TUNNEL_COUNTER_ID_LIST }, { CounterType::HOSTIF_TRAP, FLOW_COUNTER_ID_LIST }, { CounterType::ROUTE, FLOW_COUNTER_ID_LIST }, + { CounterType::ENI, ENI_COUNTER_ID_LIST }, }; FlexManagerDirectory g_FlexManagerDirectory; diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index 80a9e606e6..b9e6e4c487 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -32,6 +32,7 @@ enum class CounterType TUNNEL, HOSTIF_TRAP, ROUTE, + ENI }; // FlexCounterManager allows users to manage a group of flex counters. diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index 19302face9..705622bfa0 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -13,6 +13,7 @@ #include #include "routeorch.h" #include "macsecorch.h" +#include "dash/dashorch.h" #include "flowcounterrouteorch.h" extern sai_port_api_t *sai_port_api; @@ -39,6 +40,7 @@ extern sai_object_id_t gSwitchId; #define TUNNEL_KEY "TUNNEL" #define FLOW_CNT_TRAP_KEY "FLOW_CNT_TRAP" #define FLOW_CNT_ROUTE_KEY "FLOW_CNT_ROUTE" +#define ENI_KEY "ENI" unordered_map flexCounterGroupMap = { @@ -61,6 +63,7 @@ unordered_map flexCounterGroupMap = {"MACSEC_SA", COUNTERS_MACSEC_SA_GROUP}, {"MACSEC_SA_ATTR", COUNTERS_MACSEC_SA_ATTR_GROUP}, {"MACSEC_FLOW", COUNTERS_MACSEC_FLOW_GROUP}, + {"ENI", ENI_STAT_COUNTER_FLEX_COUNTER_GROUP} }; @@ -84,6 +87,7 @@ void FlexCounterOrch::doTask(Consumer &consumer) SWSS_LOG_ENTER(); VxlanTunnelOrch* vxlan_tunnel_orch = gDirectory.get(); + DashOrch* dash_orch = gDirectory.get(); if (gPortsOrch && !gPortsOrch->allPortsReady()) { return; @@ -200,6 +204,10 @@ void FlexCounterOrch::doTask(Consumer &consumer) { vxlan_tunnel_orch->generateTunnelCounterMap(); } + if (dash_orch && (key == ENI_KEY)) + { + dash_orch->handleFCStatusUpdate((value == "enable")); + } if (gCoppOrch && (key == FLOW_CNT_TRAP_KEY)) { if (value == "enable") diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index a34a30eb4b..c3340e0cf3 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -14,7 +14,9 @@ extern "C" PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) - : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + : Orch(db, tableNames), + m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + m_portOpErrTable(stateDb, STATE_PORT_OPER_ERR_TABLE_NAME), port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), port_buffer_drop_stat_manager(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, StatsMode::READ, diff --git a/orchagent/port.h b/orchagent/port.h index 0ae9b97b67..318a60a376 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -10,8 +10,10 @@ extern "C" { #include #include #include +#include #include - +#include +#include #include #include @@ -74,6 +76,42 @@ struct SystemLagInfo int32_t spa_id = 0; }; +class PortOperErrorEvent +{ +public: + PortOperErrorEvent() = default; + PortOperErrorEvent(const sai_port_error_status_t error, std::string key) : m_errorFlag(error), m_dbKeyError(key){} + ~PortOperErrorEvent() = default; + + inline void incrementErrorCount(void) { m_errorCount++; } + + inline size_t getErrorCount(void) const { return m_errorCount; } + + void recordEventTime(void) { + auto now = std::chrono::system_clock::now(); + m_eventTime = std::chrono::system_clock::to_time_t(now); + } + + std::string getEventTime(void) { + std::ostringstream oss; + oss << std::put_time(std::gmtime(&m_eventTime), "%Y-%m-%d %H:%M:%S"); + return oss.str(); + } + + inline std::string getDbKey(void) const { return m_dbKeyError; } + + // Returns true if port oper error flag in sai_port_error_status_t is set + bool isErrorSet(sai_port_error_status_t errstatus) const { return (m_errorFlag & errstatus);} + + static const std::unordered_map db_key_errors; + +private: + sai_port_error_status_t m_errorFlag = SAI_PORT_ERROR_STATUS_CLEAR; + size_t m_errorCount = 0; + std::string m_dbKeyError; // DB key for this port error + std::time_t m_eventTime = 0; +}; + class Port { public: @@ -155,6 +193,7 @@ class Port sai_object_id_t m_parent_port_id = 0; uint32_t m_dependency_bitmap = 0; sai_port_oper_status_t m_oper_status = SAI_PORT_OPER_STATUS_UNKNOWN; + sai_port_error_status_t m_oper_error_status = SAI_PORT_ERROR_STATUS_CLEAR; //Bitmap of last port oper error status std::set m_members; std::set m_child_ports; std::vector m_queue_ids; @@ -193,6 +232,9 @@ class Port sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; + /* Port oper error status to event map*/ + std::unordered_map m_portOperErrorToEvent; + /* pre-emphasis */ std::map> m_preemphasis; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index ba5336b063..2ce9b31b6f 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -308,6 +308,25 @@ static char* hostif_vlan_tag[] = { [SAI_HOSTIF_VLAN_TAG_ORIGINAL] = "SAI_HOSTIF_VLAN_TAG_ORIGINAL" }; +const std::unordered_map PortOperErrorEvent::db_key_errors = +{ + // SAI port oper error status to error name mapping + { SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT, "mac_local_fault"}, + { SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT, "mac_remote_fault"}, + { SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS, "fec_sync_loss"}, + { SAI_PORT_ERROR_STATUS_FEC_LOSS_ALIGNMENT_MARKER, "fec_alignment_loss"}, + { SAI_PORT_ERROR_STATUS_HIGH_SER, "high_ser_error"}, + { SAI_PORT_ERROR_STATUS_HIGH_BER, "high ber_error"}, + { SAI_PORT_ERROR_STATUS_CRC_RATE, "crc_rate"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_CRC_ERROR, "data_unit_crc_error"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_SIZE, "data_unit_size"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_MISALIGNMENT_ERROR, "data_unit_misalignment_error"}, + { SAI_PORT_ERROR_STATUS_CODE_GROUP_ERROR, "code_group_error"}, + { SAI_PORT_ERROR_STATUS_SIGNAL_LOCAL_ERROR, "signal_local_error"}, + { SAI_PORT_ERROR_STATUS_NO_RX_REACHABILITY, "no_rx_reachability"} +}; + + // functions ---------------------------------------------------------------------------------------------------------- static bool isValidPortTypeForLagMember(const Port& port) @@ -509,6 +528,7 @@ bool PortsOrch::checkPathTracingCapability() PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + m_portOpErrTable(stateDb, STATE_PORT_OPER_ERR_TABLE_NAME), port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), gb_port_stat_manager(true, PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, @@ -808,6 +828,26 @@ void PortsOrch::initializeCpuPort() SWSS_LOG_NOTICE("Get CPU port pid:%" PRIx64, this->m_cpuPort.m_port_id); } +// Creating mapping of various port oper errors for error handling +void PortsOrch::initializePortOperErrors(Port &port) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("Initialize port oper errors for port %s", port.m_alias.c_str()); + + for (auto& error : PortOperErrorEvent::db_key_errors) + { + const sai_port_error_status_t error_status = error.first; + std::string error_name = error.second; + + port.m_portOperErrorToEvent[error_status] = PortOperErrorEvent(error_status, error_name); + SWSS_LOG_NOTICE("Initialize port %s error %s flag=0x%" PRIx32, + port.m_alias.c_str(), + error_name.c_str(), + error_status); + } +} + void PortsOrch::initializePorts() { SWSS_LOG_ENTER(); @@ -3351,6 +3391,26 @@ void PortsOrch::updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus m_portTable->set(port.m_alias, tuples); } +void PortsOrch::updateDbPortOperError(Port& port, PortOperErrorEvent *pevent) +{ + SWSS_LOG_ENTER(); + + auto key = pevent->getDbKey(); + vector tuples; + FieldValueTuple tup1("oper_error_status", std::to_string(port.m_oper_error_status)); + tuples.push_back(tup1); + + size_t count = pevent->getErrorCount(); + FieldValueTuple tup2(key + "_count", std::to_string(count)); + tuples.push_back(tup2); + + auto time = pevent->getEventTime(); + FieldValueTuple tup3(key + "_time", time); + tuples.push_back(tup3); + + m_portOpErrTable.set(port.m_alias, tuples); +} + void PortsOrch::updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const { SWSS_LOG_ENTER(); @@ -4613,6 +4673,8 @@ void PortsOrch::doPortTask(Consumer &consumer) /* create host_tx_ready field in state-db */ initHostTxReadyState(p); + initializePortOperErrors(p); + // Restore admin status if the port was brought down if (admin_status != p.m_admin_state_up) { @@ -8019,12 +8081,14 @@ void PortsOrch::doTask(NotificationConsumer &consumer) for (uint32_t i = 0; i < count; i++) { + Port port; sai_object_id_t id = portoperstatus[i].port_id; sai_port_oper_status_t status = portoperstatus[i].port_state; + sai_port_error_status_t port_oper_err = portoperstatus[i].port_error_status; - SWSS_LOG_NOTICE("Get port state change notification id:%" PRIx64 " status:%d", id, status); - - Port port; + SWSS_LOG_NOTICE("Get port state change notification id:%" PRIx64 " status:%d " + "oper_error_status:0x%" PRIx32, + id, status, port_oper_err); if (!getPort(id, port)) { @@ -8061,6 +8125,11 @@ void PortsOrch::doTask(NotificationConsumer &consumer) { updateDbPortOperFec(port, "N/A"); } + } else { + if (port_oper_err) + { + updatePortErrorStatus(port, port_oper_err); + } } /* update m_portList */ @@ -8089,6 +8158,53 @@ void PortsOrch::doTask(NotificationConsumer &consumer) } +void PortsOrch::updatePortErrorStatus(Port &port, sai_port_error_status_t errstatus) +{ + size_t errors = 0; + string db_port_error_name; + PortOperErrorEvent *portOperErrorEvent = nullptr; + size_t error_count = PortOperErrorEvent::db_key_errors.size(); + + SWSS_LOG_NOTICE("Port %s error state set from 0x%" PRIx32 "-> 0x%" PRIx32, + port.m_alias.c_str(), + port.m_oper_error_status, + errstatus); + + port.m_oper_error_status = errstatus; + + // Iterate through all the port oper errors + while ((errstatus >> errors) && (errors < error_count)) + { + sai_port_error_status_t error_status = static_cast(errstatus & (1 << errors)); + + if (port.m_portOperErrorToEvent.find(error_status) == port.m_portOperErrorToEvent.end()) + { + ++errors; + continue; + } + + portOperErrorEvent = &port.m_portOperErrorToEvent[error_status]; + + if (portOperErrorEvent->isErrorSet(errstatus)) + { + SWSS_LOG_NOTICE("Port %s oper error event: %s occurred", + port.m_alias.c_str(), + portOperErrorEvent->getDbKey().c_str()); + portOperErrorEvent->recordEventTime(); + portOperErrorEvent->incrementErrorCount(); + updateDbPortOperError(port, portOperErrorEvent); + } + else + { + SWSS_LOG_WARN("Port %s port oper error %s not updated in DB", + port.m_alias.c_str(), + portOperErrorEvent->getDbKey().c_str()); + } + + ++errors; + } +} + void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) { SWSS_LOG_NOTICE("Port %s oper state set from %s to %s", @@ -9094,7 +9210,7 @@ bool PortsOrch::addSystemPorts() vector keys; vector spFv; - DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector appDb("APPL_DB", 0); Table appSystemPortTable(&appDb, APP_SYSTEM_PORT_TABLE_NAME); //Retrieve system port configurations from APP DB diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 3ae283fb80..ad2bc85b48 100644 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -146,12 +146,14 @@ class PortsOrch : public Orch, public Subject void setPort(string alias, Port port); void getCpuPort(Port &port); void initHostTxReadyState(Port &port); + void initializePortOperErrors(Port &port); bool getInbandPort(Port &port); bool getVlanByVlanId(sai_vlan_id_t vlan_id, Port &vlan); bool setHostIntfsOperStatus(const Port& port, bool up) const; void updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const; void updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus); + void updateDbPortOperError(Port& port, PortOperErrorEvent *pevent); bool createVlanHostIntf(Port& vl, string hostif_name); bool removeVlanHostIntf(Port vl); @@ -263,6 +265,7 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_pgIndexTable; unique_ptr
m_stateBufferMaximumValueTable; Table m_portStateTable; + Table m_portOpErrTable; std::string getQueueWatermarkFlexCounterTableKey(std::string s); std::string getPriorityGroupWatermarkFlexCounterTableKey(std::string s); @@ -502,6 +505,7 @@ class PortsOrch : public Orch, public Subject bool initGearboxPort(Port &port); bool getPortOperFec(const Port& port, sai_port_fec_mode_t &fec_mode) const; void updateDbPortOperFec(Port &port, string fec_str); + void updatePortErrorStatus(Port &port, sai_port_error_status_t port_oper_eror); map m_recircPortRole; diff --git a/orchagent/rif_rates.lua b/orchagent/rif_rates.lua index ebce14442a..8355acbe2b 100644 --- a/orchagent/rif_rates.lua +++ b/orchagent/rif_rates.lua @@ -37,6 +37,10 @@ for i = 1, n do local out_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS') local out_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS') + if not in_octets or not in_pkts or not out_octets or not out_pkts then + return logtable + end + if initialized == "DONE" or initialized == "COUNTERS_LAST" then -- Get old COUNTERS values local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_IN_OCTETS_last') @@ -44,6 +48,10 @@ for i = 1, n do local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS_last') local out_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS_last') + if not in_octets_last or not in_pkts_last or not out_octets_last or not out_pkts_last then + return logtable + end + -- Calculate new rates values local rx_bps_new = (in_octets - in_octets_last) / delta * 1000 local tx_bps_new = (out_octets - out_octets_last) / delta * 1000 diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 1265364d97..27ac9463cb 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -74,6 +74,7 @@ sai_counter_api_t* sai_counter_api; sai_bfd_api_t* sai_bfd_api; sai_my_mac_api_t* sai_my_mac_api; sai_generic_programmable_api_t* sai_generic_programmable_api; +sai_dash_appliance_api_t* sai_dash_appliance_api; sai_dash_acl_api_t* sai_dash_acl_api; sai_dash_vnet_api_t sai_dash_vnet_api; sai_dash_outbound_ca_to_pa_api_t* sai_dash_outbound_ca_to_pa_api; @@ -221,6 +222,7 @@ void initSaiApi() sai_api_query(SAI_API_BFD, (void **)&sai_bfd_api); sai_api_query(SAI_API_MY_MAC, (void **)&sai_my_mac_api); sai_api_query(SAI_API_GENERIC_PROGRAMMABLE, (void **)&sai_generic_programmable_api); + sai_api_query((sai_api_t)SAI_API_DASH_APPLIANCE, (void**)&sai_dash_appliance_api); sai_api_query((sai_api_t)SAI_API_DASH_ACL, (void**)&sai_dash_acl_api); sai_api_query((sai_api_t)SAI_API_DASH_VNET, (void**)&sai_dash_vnet_api); sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_CA_TO_PA, (void**)&sai_dash_outbound_ca_to_pa_api); @@ -1100,3 +1102,33 @@ void stopFlexCounterPolling(sai_object_id_t switch_oid, sai_switch_api->set_switch_attribute(switch_oid, &attr); } + +/* + Use metadata info of the SAI object to infer all the available stats + Syncd already has logic to filter out the supported stats +*/ +std::vector queryAvailableCounterStats(const sai_object_type_t object_type) +{ + std::vector stat_list; + auto info = sai_metadata_get_object_type_info(object_type); + + if (!info) + { + SWSS_LOG_ERROR("Metadata info query failed, invalid object: %d", object_type); + return stat_list; + } + + SWSS_LOG_NOTICE("SAI object %s supports stat type %s", + sai_serialize_object_type(object_type).c_str(), + info->statenum->name); + + auto statenumlist = info->statenum->values; + auto statnumcount = (uint32_t)info->statenum->valuescount; + stat_list.reserve(statnumcount); + + for (uint32_t i = 0; i < statnumcount; i++) + { + stat_list.push_back(static_cast(statenumlist[i])); + } + return stat_list; +} diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index 693fffd742..7334adff35 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -49,3 +49,5 @@ void startFlexCounterPolling(sai_object_id_t switch_oid, const std::string &stats_mode=""); void stopFlexCounterPolling(sai_object_id_t switch_oid, const std::string &key); + +std::vector queryAvailableCounterStats(const sai_object_type_t); diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index b8e0090259..64442c96af 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -120,10 +120,10 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl Orch(connectors), m_switchTable(switchTable.first, switchTable.second), m_db(db), - m_stateDb(new DBConnector(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0)), + m_stateDb(new DBConnector("STATE_DB", 0)), m_asicSensorsTable(new Table(m_stateDb.get(), ASIC_TEMPERATURE_INFO_TABLE_NAME)), m_sensorsPollerTimer (new SelectableTimer((timespec { .tv_sec = DEFAULT_ASIC_SENSORS_POLLER_INTERVAL, .tv_nsec = 0 }))), - m_stateDbForNotification(new DBConnector(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0)), + m_stateDbForNotification(new DBConnector("STATE_DB", 0)), m_asicSdkHealthEventTable(new Table(m_stateDbForNotification.get(), STATE_ASIC_SDK_HEALTH_EVENT_TABLE_NAME)) { m_restartCheckNotificationConsumer = new NotificationConsumer(db, "RESTARTCHECK"); diff --git a/orchagent/trap_rates.lua b/orchagent/trap_rates.lua index 69b9c5cd3f..a20f62b9d0 100644 --- a/orchagent/trap_rates.lua +++ b/orchagent/trap_rates.lua @@ -36,7 +36,8 @@ for i = 1, n do logit(initialized) -- Get new COUNTERS values - local in_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_COUNTER_STAT_PACKETS') + local in_pkts_str = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_COUNTER_STAT_PACKETS') + local in_pkts = tonumber(in_pkts_str) or 0 if initialized == 'DONE' or initialized == 'COUNTERS_LAST' then -- Get old COUNTERS values diff --git a/tests/dash/dash_configs.py b/tests/dash/dash_configs.py index 2f1590ebaa..0635631b41 100644 --- a/tests/dash/dash_configs.py +++ b/tests/dash/dash_configs.py @@ -52,7 +52,8 @@ "sip": { "ipv4": socket.htonl(int(IP(SIP))) }, - "vm_vni": int(VM_VNI) + "vm_vni": int(VM_VNI), + "local_region_id": 10 } VNET_CONFIG = { @@ -168,4 +169,4 @@ ENI_ROUTE_GROUP2_CONFIG = { "group_id": ROUTE_GROUP2, -} \ No newline at end of file +} diff --git a/tests/dash/test_dash_vnet.py b/tests/dash/test_dash_vnet.py index a591b5c9b1..05646cd796 100644 --- a/tests/dash/test_dash_vnet.py +++ b/tests/dash/test_dash_vnet.py @@ -8,6 +8,7 @@ from dash_api.vnet_mapping_pb2 import * from dash_api.route_type_pb2 import * from dash_api.types_pb2 import * +from dvslib.dvs_flex_counter import TestFlexCountersBase from dash_db import * from dash_configs import * @@ -19,14 +20,22 @@ from dvslib.sai_utils import assert_sai_attribute_exists +eni_counter_group_meta = { + 'key': 'ENI', + 'group_name': 'ENI_STAT_COUNTER', + 'name_map': 'COUNTERS_ENI_NAME_MAP', + 'post_test': 'post_eni_counter_test' +} + DVS_ENV = ["HWSKU=DPU-2P"] NUM_PORTS = 2 -class TestDash(object): +class TestDash(TestFlexCountersBase): def test_appliance(self, dash_db: DashDB): self.appliance_id = "100" self.sip = "10.0.0.1" self.vm_vni = "4321" + self.local_region_id = "10" pb = Appliance() pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) pb.vm_vni = int(self.vm_vni) @@ -54,6 +63,14 @@ def test_vnet(self, dash_db: DashDB): vnet_attr = dash_db.get_asic_db_entry(ASIC_VNET_TABLE, self.vnet_oid) assert_sai_attribute_exists("SAI_VNET_ATTR_VNI", vnet_attr, self.vni) + def post_eni_counter_test(self, meta_data): + counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') + + for counter_entry in counters_keys.items(): + self.wait_for_id_list_remove(meta_data['group_name'], counter_entry[0], counter_entry[1]) + self.wait_for_table_empty(meta_data['name_map']) + def test_eni(self, dash_db: DashDB): self.vnet = "Vnet1" self.mac_string = "F4939FEFC47E" @@ -78,6 +95,9 @@ def test_eni(self, dash_db: DashDB): assert_sai_attribute_exists("SAI_ENI_ATTR_VNET_ID", attrs, str(self.vnet_oid)) assert_sai_attribute_exists("SAI_ENI_ATTR_ADMIN_STATE", attrs, "true") + time.sleep(1) + self.verify_flex_counter_flow(dash_db.dvs, eni_counter_group_meta) + eni_addr_maps = dash_db.wait_for_asic_db_keys(ASIC_ENI_ETHER_ADDR_MAP_TABLE) attrs = dash_db.get_asic_db_entry(ASIC_ENI_ETHER_ADDR_MAP_TABLE, eni_addr_maps[0]) assert_sai_attribute_exists("SAI_ENI_ETHER_ADDRESS_MAP_ENTRY_ATTR_ENI_ID", attrs, str(self.eni_oid)) diff --git a/tests/dvslib/dvs_flex_counter.py b/tests/dvslib/dvs_flex_counter.py new file mode 100644 index 0000000000..a2f4e52280 --- /dev/null +++ b/tests/dvslib/dvs_flex_counter.py @@ -0,0 +1,120 @@ +import time + +NUMBER_OF_RETRIES = 10 + +class TestFlexCountersBase(object): + + def setup_dbs(self, dvs): + self.config_db = dvs.get_config_db() + self.flex_db = dvs.get_flex_db() + self.counters_db = dvs.get_counters_db() + self.app_db = dvs.get_app_db() + + def wait_for_table(self, table): + for retry in range(NUMBER_OF_RETRIES): + counters_keys = self.counters_db.db_connection.hgetall(table) + if len(counters_keys) > 0: + return + else: + time.sleep(1) + + assert False, str(table) + " not created in Counters DB" + + def wait_for_table_empty(self, table): + for retry in range(NUMBER_OF_RETRIES): + counters_keys = self.counters_db.db_connection.hgetall(table) + if len(counters_keys) == 0: + return + else: + time.sleep(1) + + assert False, str(table) + " is still in Counters DB" + + def wait_for_id_list(self, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) > 0: + return + else: + time.sleep(1) + + assert False, "No ID list for counter " + str(name) + + def wait_for_id_list_remove(self, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) == 0: + return + else: + time.sleep(1) + + assert False, "ID list for counter " + str(name) + " is still there" + + def wait_for_interval_set(self, group, interval): + interval_value = None + for retry in range(NUMBER_OF_RETRIES): + interval_value = self.flex_db.db_connection.hget("FLEX_COUNTER_GROUP_TABLE:" + group, 'POLL_INTERVAL') + if interval_value == interval: + return + else: + time.sleep(1) + + assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + + def set_flex_counter_group_status(self, group, map, status='enable', check_name_map=True): + group_stats_entry = {"FLEX_COUNTER_STATUS": status} + self.config_db.create_entry("FLEX_COUNTER_TABLE", group, group_stats_entry) + if check_name_map: + if status == 'enable': + self.wait_for_table(map) + else: + self.wait_for_table_empty(map) + + def verify_flex_counters_populated(self, map, stat): + counters_keys = self.counters_db.db_connection.hgetall(map) + for counter_entry in counters_keys.items(): + name = counter_entry[0] + oid = counter_entry[1] + self.wait_for_id_list(stat, name, oid) + + def set_flex_counter_group_interval(self, key, group, interval): + group_stats_entry = {"POLL_INTERVAL": interval} + self.config_db.create_entry("FLEX_COUNTER_TABLE", key, group_stats_entry) + self.wait_for_interval_set(group, interval) + + def verify_no_flex_counters_tables(self, counter_stat): + counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) + assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" + + def verify_flex_counter_flow(self, dvs, meta_data): + """ + The test will check there are no flex counters tables on FlexCounter DB when the counters are disabled. + After enabling each counter group, the test will check the flow of creating flex counters tables on FlexCounter DB. + For some counter types the MAPS on COUNTERS DB will be created as well after enabling the counter group, this will be also verified on this test. + """ + self.setup_dbs(dvs) + counter_key = meta_data['key'] + counter_stat = meta_data['group_name'] + counter_map = meta_data['name_map'] + pre_test = meta_data.get('pre_test') + post_test = meta_data.get('post_test') + meta_data['dvs'] = dvs + + self.verify_no_flex_counters_tables(counter_stat) + + if pre_test: + if not hasattr(self, pre_test): + assert False, "Test object does not have the method {}".format(pre_test) + cb = getattr(self, pre_test) + cb(meta_data) + + self.set_flex_counter_group_status(counter_key, counter_map) + self.verify_flex_counters_populated(counter_map, counter_stat) + self.set_flex_counter_group_interval(counter_key, counter_stat, '2500') + + if post_test: + if not hasattr(self, post_test): + assert False, "Test object does not have the method {}".format(post_test) + cb = getattr(self, post_test) + cb(meta_data) + diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 999aba00ff..0d698b8451 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -680,6 +680,163 @@ namespace portsorch_test cleanupPorts(gPortsOrch); } + /* + * Test port oper error count + */ + TEST_F(PortsOrchTest, PortOperErrorStatus) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table portTableOpErrState = Table(m_state_db.get(), STATE_PORT_OPER_ERR_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port, expect the oper status is not UP + Port port; + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 0); + + auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + + std::vector errors = { + SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT, + SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT, + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT), + static_cast( + SAI_PORT_ERROR_STATUS_FEC_LOSS_ALIGNMENT_MARKER | + SAI_PORT_ERROR_STATUS_HIGH_SER | + SAI_PORT_ERROR_STATUS_HIGH_BER | + SAI_PORT_ERROR_STATUS_CRC_RATE), + SAI_PORT_ERROR_STATUS_DATA_UNIT_CRC_ERROR, + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_DATA_UNIT_SIZE | + SAI_PORT_ERROR_STATUS_DATA_UNIT_MISALIGNMENT_ERROR), + static_cast( + SAI_PORT_ERROR_STATUS_CODE_GROUP_ERROR | + SAI_PORT_ERROR_STATUS_SIGNAL_LOCAL_ERROR | + SAI_PORT_ERROR_STATUS_NO_RX_REACHABILITY), + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT) + }; + + // mock a redis reply for notification, it notifies that Ehernet0 is going to up + for (uint32_t count=0; count < errors.size(); count++) { + sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_port_oper_status_notification_t port_oper_status; + memset(&port_oper_status, 0, sizeof(port_oper_status)); + port_oper_status.port_error_status = errors[count]; + port_oper_status.port_state = oper_status; + port_oper_status.port_id = port.m_port_id; + std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); + std::vector notifyValues; + FieldValueTuple opdata("port_state_change", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + consumer->readData(); + gPortsOrch->doTask(*consumer); + mockReply = nullptr; + gPortsOrch->getPort("Ethernet0", port); + gPortsOrch->updatePortErrorStatus(port, errors[count]); + ASSERT_TRUE(port.m_oper_error_status == errors[count]); + } + + std::vector values; + portTableOpErrState.get("Ethernet0", values); + + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "mac_local_fault_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "2"); + } + else if (fvField(valueTuple) == "mac_remote_fault_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "2"); + } + else if (fvField(valueTuple) == "oper_error_status") + { + ASSERT_TRUE(fvValue(valueTuple) == "3"); + } + else if (fvField(valueTuple) == "fec_sync_loss_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "3"); + } + else if (fvField(valueTuple) == "fec_alignment_loss_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "high_ser_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "high ber_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "crc_rate_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_crc_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_size_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_misalignment_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "code_group_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "signal_local_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "no_rx_reachability_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + } + + cleanupPorts(gPortsOrch); + } + TEST_F(PortsOrchTest, PortBulkCreateRemove) { auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index f590b7748c..c664f0390e 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -1,11 +1,11 @@ import time import pytest +from dvslib.dvs_flex_counter import TestFlexCountersBase, NUMBER_OF_RETRIES from swsscommon import swsscommon TUNNEL_TYPE_MAP = "COUNTERS_TUNNEL_TYPE_MAP" ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" -NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" counter_group_meta = { @@ -81,64 +81,8 @@ } } -class TestFlexCounters(object): - def setup_dbs(self, dvs): - self.config_db = dvs.get_config_db() - self.flex_db = dvs.get_flex_db() - self.counters_db = dvs.get_counters_db() - self.app_db = dvs.get_app_db() - - def wait_for_table(self, table): - for retry in range(NUMBER_OF_RETRIES): - counters_keys = self.counters_db.db_connection.hgetall(table) - if len(counters_keys) > 0: - return - else: - time.sleep(1) - - assert False, str(table) + " not created in Counters DB" - - def wait_for_table_empty(self, table): - for retry in range(NUMBER_OF_RETRIES): - counters_keys = self.counters_db.db_connection.hgetall(table) - if len(counters_keys) == 0: - return - else: - time.sleep(1) - - assert False, str(table) + " is still in Counters DB" - - def wait_for_id_list(self, stat, name, oid): - for retry in range(NUMBER_OF_RETRIES): - id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() - if len(id_list) > 0: - return - else: - time.sleep(1) - - assert False, "No ID list for counter " + str(name) - - def wait_for_id_list_remove(self, stat, name, oid): - for retry in range(NUMBER_OF_RETRIES): - id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() - if len(id_list) == 0: - return - else: - time.sleep(1) - - assert False, "ID list for counter " + str(name) + " is still there" - - def wait_for_interval_set(self, group, interval): - interval_value = None - for retry in range(NUMBER_OF_RETRIES): - interval_value = self.flex_db.db_connection.hget("FLEX_COUNTER_GROUP_TABLE:" + group, 'POLL_INTERVAL') - if interval_value == interval: - return - else: - time.sleep(1) - - assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) +class TestFlexCounters(TestFlexCountersBase): def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): for retry in range(NUMBER_OF_RETRIES): @@ -152,10 +96,6 @@ def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) - def verify_no_flex_counters_tables(self, counter_stat): - counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) - assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" - def verify_no_flex_counters_tables_after_delete(self, counter_stat): for retry in range(NUMBER_OF_RETRIES): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat + ":") @@ -165,13 +105,6 @@ def verify_no_flex_counters_tables_after_delete(self, counter_stat): time.sleep(1) assert False, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist after removing the entries" - def verify_flex_counters_populated(self, map, stat): - counters_keys = self.counters_db.db_connection.hgetall(map) - for counter_entry in counters_keys.items(): - name = counter_entry[0] - oid = counter_entry[1] - self.wait_for_id_list(stat, name, oid) - def verify_tunnel_type_vxlan(self, meta_data, type_map): counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) for counter_entry in counters_keys.items(): @@ -186,53 +119,13 @@ def verify_only_phy_ports_created(self, meta_data): for port_stat in port_counters_stat_keys: assert port_stat in dict(port_counters_keys.items()).values(), "Non PHY port created on PORT_STAT_COUNTER group: {}".format(port_stat) - def set_flex_counter_group_status(self, group, map, status='enable', check_name_map=True): - group_stats_entry = {"FLEX_COUNTER_STATUS": status} - self.config_db.create_entry("FLEX_COUNTER_TABLE", group, group_stats_entry) - if check_name_map: - if status == 'enable': - self.wait_for_table(map) - else: - self.wait_for_table_empty(map) - - def set_flex_counter_group_interval(self, key, group, interval): - group_stats_entry = {"POLL_INTERVAL": interval} - self.config_db.create_entry("FLEX_COUNTER_TABLE", key, group_stats_entry) - self.wait_for_interval_set(group, interval) - def set_only_config_db_buffers_field(self, value): fvs = {'create_only_config_db_buffers' : value} self.config_db.update_entry("DEVICE_METADATA", "localhost", fvs) @pytest.mark.parametrize("counter_type", counter_group_meta.keys()) def test_flex_counters(self, dvs, counter_type): - """ - The test will check there are no flex counters tables on FlexCounter DB when the counters are disabled. - After enabling each counter group, the test will check the flow of creating flex counters tables on FlexCounter DB. - For some counter types the MAPS on COUNTERS DB will be created as well after enabling the counter group, this will be also verified on this test. - """ - self.setup_dbs(dvs) - meta_data = counter_group_meta[counter_type] - counter_key = meta_data['key'] - counter_stat = meta_data['group_name'] - counter_map = meta_data['name_map'] - pre_test = meta_data.get('pre_test') - post_test = meta_data.get('post_test') - meta_data['dvs'] = dvs - - self.verify_no_flex_counters_tables(counter_stat) - - if pre_test: - cb = getattr(self, pre_test) - cb(meta_data) - - self.set_flex_counter_group_status(counter_key, counter_map) - self.verify_flex_counters_populated(counter_map, counter_stat) - self.set_flex_counter_group_interval(counter_key, counter_stat, '2500') - - if post_test: - cb = getattr(self, post_test) - cb(meta_data) + self.verify_flex_counter_flow(dvs, counter_group_meta[counter_type]) def pre_rif_counter_test(self, meta_data): self.config_db.db_connection.hset('INTERFACE|Ethernet0', "NULL", "NULL") diff --git a/tests/test_vnet.py b/tests/test_vnet.py index be08a52c69..caad0b4a25 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -8,1222 +8,7 @@ from swsscommon import swsscommon from pprint import pprint from dvslib.dvs_common import wait_for_result - - -def create_entry(tbl, key, pairs): - fvs = swsscommon.FieldValuePairs(pairs) - tbl.set(key, fvs) - time.sleep(1) - - -def create_entry_tbl(db, table, separator, key, pairs): - tbl = swsscommon.Table(db, table) - create_entry(tbl, key, pairs) - - -def create_entry_pst(db, table, separator, key, pairs): - tbl = swsscommon.ProducerStateTable(db, table) - create_entry(tbl, key, pairs) - - -def delete_entry_tbl(db, table, key): - tbl = swsscommon.Table(db, table) - tbl._del(key) - time.sleep(1) - - -def delete_entry_pst(db, table, key): - tbl = swsscommon.ProducerStateTable(db, table) - tbl._del(key) - time.sleep(1) - - -def how_many_entries_exist(db, table): - tbl = swsscommon.Table(db, table) - return len(tbl.getKeys()) - - -def entries(db, table): - tbl = swsscommon.Table(db, table) - return set(tbl.getKeys()) - - -def get_exist_entries(dvs, table): - db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(db, table) - return set(tbl.getKeys()) - - -def get_created_entry(db, table, existed_entries): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - new_entries = list(entries - existed_entries) - assert len(new_entries) == 1, "Wrong number of created entries." - return new_entries[0] - - -def get_all_created_entries(db, table, existed_entries): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - new_entries = list(entries - set(existed_entries)) - assert len(new_entries) >= 0, "Get all could be no new created entries." - new_entries.sort() - return new_entries - - -def get_created_entries(db, table, existed_entries, count): - new_entries = get_all_created_entries(db, table, existed_entries) - assert len(new_entries) == count, "Wrong number of created entries." - return new_entries - - -def get_deleted_entries(db, table, existed_entries, count): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - old_entries = list(existed_entries - entries) - assert len(old_entries) == count, "Wrong number of deleted entries." - old_entries.sort() - return old_entries - - -def get_default_vr_id(dvs): - db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER' - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert len(keys) == 1, "Wrong number of virtual routers found" - - return keys[0] - - -def check_object(db, table, key, expected_attributes): - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert key in keys, "The desired key is not presented" - - status, fvs = tbl.get(key) - assert status, "Got an error when get a key" - - assert len(fvs) >= len(expected_attributes), "Incorrect attributes" - - attr_keys = {entry[0] for entry in fvs} - - for name, value in fvs: - if name in expected_attributes: - assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ - (value, name, expected_attributes[name]) - -def check_deleted_object(db, table, key): - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert key not in keys, "The desired key is not removed" - - -def create_vnet_local_routes(dvs, prefix, vnet_name, ifname): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - create_entry_tbl( - conf_db, - "VNET_ROUTE", '|', "%s|%s" % (vnet_name, prefix), - [ - ("ifname", ifname), - ] - ) - - time.sleep(2) - - -def delete_vnet_local_routes(dvs, prefix, vnet_name): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - - delete_entry_pst(app_db, "VNET_ROUTE_TABLE", "%s:%s" % (vnet_name, prefix)) - - time.sleep(2) - - -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) - - -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - attrs = [ - ("endpoint", endpoint), - ] - - if vni: - attrs.append(('vni', vni)) - - if mac: - attrs.append(('mac_address', mac)) - - if ep_monitor: - attrs.append(('endpoint_monitor', ep_monitor)) - - if profile: - attrs.append(('profile', profile)) - - if primary: - attrs.append(('primary', primary)) - - if monitoring: - attrs.append(('monitoring', monitoring)) - - if adv_prefix: - attrs.append(('adv_prefix', adv_prefix)) - - tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") - fvs = swsscommon.FieldValuePairs(attrs) - tbl.set("%s|%s" % (vnet_name, prefix), fvs) - - time.sleep(2) - - -def delete_vnet_routes(dvs, prefix, vnet_name): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - - delete_entry_pst(app_db, "VNET_ROUTE_TUNNEL_TABLE", "%s:%s" % (vnet_name, prefix)) - - time.sleep(2) - - -def create_vlan(dvs, vlan_name, vlan_ids): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - vlan_id = vlan_name[4:] - - # create vlan - create_entry_tbl( - conf_db, - "VLAN", '|', vlan_name, - [ - ("vlanid", vlan_id), - ], - ) - - time.sleep(1) - - vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids) - - check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid, - { - "SAI_VLAN_ATTR_VLAN_ID": vlan_id, - } - ) - - return vlan_oid - - -def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") - - vlan_oid = create_vlan (dvs, vlan_name, vlan_ids) - - # create a vlan member in config db - create_entry_tbl( - conf_db, - "VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname), - [ - ("tagging_mode", "untagged"), - ], - ) - - time.sleep(1) - - # create vlan interface in config db - create_entry_tbl( - conf_db, - "VLAN_INTERFACE", '|', vlan_name, - [ - ("vnet_name", vnet_name), - ("proxy_arp", "enabled"), - ], - ) - - #FIXME - This is created by IntfMgr - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - create_entry_pst( - app_db, - "INTF_TABLE", ':', vlan_name, - [ - ("vnet_name", vnet_name), - ("proxy_arp", "enabled"), - ], - ) - time.sleep(2) - - create_entry_tbl( - conf_db, - "VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr), - [ - ("family", "IPv4"), - ], - ) - - time.sleep(2) - - return vlan_oid - - -def delete_vlan_interface(dvs, ifname, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - delete_entry_tbl(conf_db, "VLAN_INTERFACE", "%s|%s" % (ifname, ipaddr)) - - time.sleep(2) - - delete_entry_tbl(conf_db, "VLAN_INTERFACE", ifname) - - time.sleep(2) - - -def create_phy_interface(dvs, ifname, vnet_name, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") - - # create vlan interface in config db - create_entry_tbl( - conf_db, - "INTERFACE", '|', ifname, - [ - ("vnet_name", vnet_name), - ], - ) - - #FIXME - This is created by IntfMgr - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - create_entry_pst( - app_db, - "INTF_TABLE", ':', ifname, - [ - ("vnet_name", vnet_name), - ], - ) - time.sleep(2) - - create_entry_tbl( - conf_db, - "INTERFACE", '|', "%s|%s" % (ifname, ipaddr), - [ - ("family", "IPv4"), - ], - ) - - -def delete_phy_interface(dvs, ifname, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - delete_entry_tbl(conf_db, "INTERFACE", "%s|%s" % (ifname, ipaddr)) - - time.sleep(2) - - delete_entry_tbl(conf_db, "INTERFACE", ifname) - - time.sleep(2) - - -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - attrs = [ - ("vxlan_tunnel", tunnel), - ("vni", vni), - ("peer_list", peer_list), - ] - - if scope: - attrs.append(('scope', scope)) - - if advertise_prefix: - attrs.append(('advertise_prefix', 'true')) - - if overlay_dmac: - attrs.append(('overlay_dmac', overlay_dmac)) - - # create the VXLAN tunnel Term entry in Config DB - create_entry_tbl( - conf_db, - "VNET", '|', name, - attrs, - ) - - time.sleep(2) - - -def delete_vnet_entry(dvs, name): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - delete_entry_tbl(conf_db, "VNET", "%s" % (name)) - - time.sleep(2) - - -def create_vxlan_tunnel(dvs, name, src_ip): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - attrs = [ - ("src_ip", src_ip), - ] - - # create the VXLAN tunnel Term entry in Config DB - create_entry_tbl( - conf_db, - "VXLAN_TUNNEL", '|', name, - attrs, - ) - -def delete_vxlan_tunnel(dvs, name): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) - -def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - # create the VXLAN tunnel map entry in Config DB - create_entry_tbl( - conf_db, - "VXLAN_TUNNEL_MAP", '|', "%s|%s" % (tunnel_name, tunnel_map_entry_name), - [ - ("vni", vni_id), - ("vlan", vlan), - ], - ) - - -def get_lo(dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - vr_id = get_default_vr_id(dvs) - - tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') - - entries = tbl.getKeys() - lo_id = None - for entry in entries: - status, fvs = tbl.get(entry) - assert status, "Got an error when get a key" - for key, value in fvs: - if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': - lo_id = entry - break - else: - assert False, 'Don\'t found loopback id' - - return lo_id - - -def get_switch_mac(dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH') - - entries = tbl.getKeys() - mac = None - for entry in entries: - status, fvs = tbl.get(entry) - assert status, "Got an error when get a key" - for key, value in fvs: - if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS': - mac = value - break - else: - assert False, 'Don\'t found switch mac' - - return mac - - -def check_linux_intf_arp_proxy(dvs, ifname): - (exitcode, out) = dvs.runcmd("cat /proc/sys/net/ipv4/conf/{0}/proxy_arp_pvlan".format(ifname)) - assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" - - -def update_bfd_session_state(dvs, addr, state): - bfd_id = get_bfd_session_id(dvs, addr) - assert bfd_id is not None - - bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", - "Down": "SAI_BFD_SESSION_STATE_DOWN", - "Init": "SAI_BFD_SESSION_STATE_INIT", - "Up": "SAI_BFD_SESSION_STATE_UP"} - - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") - fvp = swsscommon.FieldValuePairs() - ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" - ntf.send("bfd_session_state_change", ntf_data, fvp) - -def update_monitor_session_state(dvs, addr, monitor, state): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - create_entry_tbl( - state_db, - "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), - [ - ("state", state), - ] - ) - -def get_bfd_session_id(dvs, addr): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") - entries = set(tbl.getKeys()) - for entry in entries: - status, fvs = tbl.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true": - return entry - - return None - - -def check_del_bfd_session(dvs, addrs): - for addr in addrs: - assert get_bfd_session_id(dvs, addr) is None - - -def check_bfd_session(dvs, addrs): - for addr in addrs: - assert get_bfd_session_id(dvs, addr) is not None - - -def check_state_db_routes(dvs, vnet, prefix, endpoints): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") - - status, fvs = tbl.get(vnet + '|' + prefix) - assert status, "Got an error when get a key" - - fvs = dict(fvs) - assert fvs['active_endpoints'] == ','.join(endpoints) - - if endpoints: - assert fvs['state'] == 'active' - else: - assert fvs['state'] == 'inactive' - - -def check_remove_state_db_routes(dvs, vnet, prefix): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") - keys = tbl.getKeys() - - assert vnet + '|' + prefix not in keys - - -def check_routes_advertisement(dvs, prefix, profile=""): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") - keys = tbl.getKeys() - - assert prefix in keys - - if profile: - status, fvs = tbl.get(prefix) - assert status, "Got an error when get a key" - fvs = dict(fvs) - assert fvs['profile'] == profile - - -def check_remove_routes_advertisement(dvs, prefix): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") - keys = tbl.getKeys() - - assert prefix not in keys - - -def check_syslog(dvs, marker, err_log): - (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) - assert num.strip() == "0" - - -def create_fvs(**kwargs): - return swsscommon.FieldValuePairs(list(kwargs.items())) - - -def create_subnet_decap_tunnel(dvs, tunnel_name, **kwargs): - """Create tunnel and verify all needed entries in state DB exists.""" - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - fvs = create_fvs(**kwargs) - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") - ps.set(tunnel_name, fvs) - - # wait till config will be applied - time.sleep(1) - - # validate the tunnel entry in state db - tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") - - tunnels = tunnel_state_table.getKeys() - for tunnel in tunnels: - status, fvs = tunnel_state_table.get(tunnel) - assert status == True - - for field, value in fvs: - if field == "tunnel_type": - assert value == "IPINIP" - elif field == "dscp_mode": - assert value == kwargs["dscp_mode"] - elif field == "ecn_mode": - assert value == kwargs["ecn_mode"] - elif field == "ttl_mode": - assert value == kwargs["ttl_mode"] - elif field == "encap_ecn_mode": - assert value == kwargs["encap_ecn_mode"] - else: - assert False, "Field %s is not tested" % field - - -def delete_subnet_decap_tunnel(dvs, tunnel_name): - """Delete tunnel and checks that state DB is cleared.""" - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tunnel_app_table = swsscommon.Table(appdb, "TUNNEL_DECAP_TABLE") - tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") - - ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") - ps._del(tunnel_name) - - # wait till config will be applied - time.sleep(1) - - assert len(tunnel_app_table.getKeys()) == 0 - assert len(tunnel_state_table.getKeys()) == 0 - - -loopback_id = 0 -def_vr_id = 0 -switch_mac = None - -def update_bgp_global_dev_state(dvs, state): - config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - create_entry_tbl( - config_db, - "BGP_DEVICE_GLOBAL",'|',"STATE", - [ - ("tsa_enabled", state), - ] - ) - -def set_tsa(dvs): - update_bgp_global_dev_state(dvs, "true") - -def clear_tsa(dvs): - update_bgp_global_dev_state(dvs, "false") - -class VnetVxlanVrfTunnel(object): - - ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" - ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP" - ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" - ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" - ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" - ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" - ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" - ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" - ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" - ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" - ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" - ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" - APP_VNET_MONITOR = "VNET_MONITOR_TABLE" - - ecn_modes_map = { - "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", - "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" - } - - dscp_modes_map = { - "pipe" : "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL", - "uniform" : "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL" - } - - ttl_modes_map = { - "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", - "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" - } - - def __init__(self): - self.tunnel_map_ids = set() - self.tunnel_map_entry_ids = set() - self.tunnel_ids = set() - self.tunnel_term_ids = set() - self.ipinip_tunnel_term_ids = {} - self.tunnel_map_map = {} - self.tunnel = {} - self.vnet_vr_ids = set() - self.vr_map = {} - self.nh_ids = {} - self.nhg_ids = {} - - def fetch_exist_entries(self, dvs): - self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) - self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE) - self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP) - self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY) - self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY) - self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE) - self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) - self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) - self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) - self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) - - global loopback_id, def_vr_id, switch_mac - if not loopback_id: - loopback_id = get_lo(dvs) - - if not def_vr_id: - def_vr_id = get_default_vr_id(dvs) - - if switch_mac is None: - switch_mac = get_switch_mac(dvs) - - def check_ipinip_tunnel(self, dvs, tunnel_name, dscp_mode, ecn_mode, ttl_mode): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) - tunnel_attrs = { - 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', - 'SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE': self.dscp_modes_map[dscp_mode], - 'SAI_TUNNEL_ATTR_ENCAP_ECN_MODE': self.ecn_modes_map[ecn_mode], - 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': self.ttl_modes_map[ttl_mode] - } - check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, tunnel_attrs) - - self.tunnel_ids.add(tunnel_id) - self.tunnel[tunnel_name] = tunnel_id - - def check_del_ipinip_tunnel(self, dvs, tunnel_name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - tunnel_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1)[0] - check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id) - self.tunnel_ids.remove(tunnel_id) - assert tunnel_id == self.tunnel[tunnel_name] - self.tunnel.pop(tunnel_name) - - def check_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - dst_ip = ipaddress.ip_network(dst_ip) - src_ip = ipaddress.ip_network(src_ip) - tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) - tunnel_term_attrs = { - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': str(dst_ip.network_address), - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK': str(dst_ip.netmask), - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP': str(src_ip.network_address), - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK': str(src_ip.netmask), - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': self.tunnel[tunnel_name] - } - check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, tunnel_term_attrs) - - self.tunnel_term_ids.add(tunnel_term_id) - self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] = tunnel_term_id - - def check_del_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - dst_ip = ipaddress.ip_network(dst_ip) - src_ip = ipaddress.ip_network(src_ip) - tunnel_term_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids, 1)[0] - check_deleted_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id) - self.tunnel_term_ids.remove(tunnel_term_id) - assert self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] == tunnel_term_id - self.ipinip_tunnel_term_ids.pop((tunnel_name, src_ip, dst_ip)) - - def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - global loopback_id, def_vr_id - - tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) - tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) - - # check that the vxlan tunnel termination are there - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" - - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[2], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[3], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VNI', - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, - { - 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', - 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, - 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[0], tunnel_map_id[2]), - 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[1], tunnel_map_id[3]), - 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, - } - ) - - expected_attributes = { - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id, - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip, - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id, - } - - check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes) - - self.tunnel_map_ids.update(tunnel_map_id) - self.tunnel_ids.add(tunnel_id) - self.tunnel_term_ids.add(tunnel_term_id) - self.tunnel_map_map[tunnel_name] = tunnel_map_id - self.tunnel[tunnel_name] = tunnel_id - - def check_del_vxlan_tunnel(self, dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) - check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) - self.tunnel_ids.remove(old_tunnel[0]) - - old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - for old_tunnel_map in old_tunnel_maps: - check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) - self.tunnel_map_ids.remove(old_tunnel_map) - - def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - - time.sleep(2) - - if (self.tunnel_map_map.get(tunnel_name) is None): - tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - else: - tunnel_map_id = self.tunnel_map_map[tunnel_name] - - tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) - - # check that the vxlan tunnel termination are there - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" - - check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], - { - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'), - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], - { - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'), - } - ) - - self.tunnel_map_entry_ids.update(tunnel_map_entry_id) - - def check_vnet_entry(self, dvs, name, peer_list=[]): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - - #Assert if there are linklocal entries - tbl = swsscommon.Table(app_db, "VNET_ROUTE_TUNNEL_TABLE") - route_entries = tbl.getKeys() - assert "ff00::/8" not in route_entries - assert "fe80::/64" not in route_entries - - #Check virtual router objects - assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 1),\ - "The VR objects are not created" - - new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 1) - - self.vnet_vr_ids.update(new_vr_ids) - self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[0], 'peer':peer_list } - - def check_default_vnet_entry(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - #Check virtual router objects - assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids)),\ - "Some VR objects are created" - #Mappers for default VNET is created with default VR objects. - self.vr_map[name] = { 'ing':list(self.vnet_vr_ids)[0], 'egr':list(self.vnet_vr_ids)[0], 'peer':[] } - - def check_del_vnet_entry(self, dvs, name): - # TODO: Implement for VRF VNET - return True - - def vnet_route_ids(self, dvs, name, local=False): - vr_set = set() - - vr_set.add(self.vr_map[name].get('ing')) - - try: - for peer in self.vr_map[name].get('peer'): - vr_set.add(self.vr_map[peer].get('ing')) - except IndexError: - pass - - return vr_set - - def check_router_interface(self, dvs, intf_name, name, vlan_oid=0): - # Check RIF in ingress VRF - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - global switch_mac - - expected_attr = { - "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'), - "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac, - "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", - } - - if vlan_oid: - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'}) - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid}) - else: - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'}) - - new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs) - check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr) - - #IP2ME route will be created with every router interface - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, 1) - - if vlan_oid: - expected_attr = { 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } - check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) - - expected_attr = { 'SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } - check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) - - check_linux_intf_arp_proxy(dvs, intf_name) - - self.rifs.add(new_rif) - self.routes.update(new_route) - - def check_del_router_interface(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - old_rif = get_deleted_entries(asic_db, self.ASIC_RIF_TABLE, self.rifs, 1) - check_deleted_object(asic_db, self.ASIC_RIF_TABLE, old_rif[0]) - - self.rifs.remove(old_rif[0]) - - def check_vnet_local_routes(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - vr_ids = self.vnet_route_ids(dvs, name, True) - count = len(vr_ids) - - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - - #Routes are not replicated to egress VRF, return if count is 0, else check peering - if not count: - return - - asic_vrs = set() - for idx in range(count): - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) - - assert asic_vrs == vr_ids - - self.routes.update(new_route) - - def check_del_vnet_local_routes(self, dvs, name): - # TODO: Implement for VRF VNET - return True - - def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0, route_ids=""): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - - vr_ids = self.vnet_route_ids(dvs, name) - count = len(vr_ids) - - # Check routes in ingress VRF - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } - - if vni: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) - - if mac: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) - - if endpoint in self.nh_ids: - new_nh = self.nh_ids[endpoint] - else: - new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops) - self.nh_ids[endpoint] = new_nh - self.nhops.add(new_nh) - - check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr) - if not route_ids: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - else: - new_route = route_ids - - #Check if the route is in expected VRF - asic_vrs = set() - for idx in range(count): - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh, - } - ) - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) - - assert asic_vrs == vr_ids - - self.routes.update(new_route) - - return new_route - - def serialize_endpoint_group(self, endpoints): - endpoints.sort() - return ",".join(endpoints) - - def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): - expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) - tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) - entries = set(tbl_nhgm.getKeys()) - endpoints = [] - for entry in entries: - status, fvs = tbl_nhgm.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: - nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] - status, nh_fvs = tbl_nh.get(nh_key) - nh_fvs = dict(nh_fvs) - assert status, "Got an error when get a key" - endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] - endpoints.append(endpoint) - assert endpoint in expected_attrs - if ordered_ecmp == "true": - assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] - del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] - else: - assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None - - check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) - - assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str - - def get_nexthop_groups(self, dvs, nhg): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) - tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) - nhg_data = {} - nhg_data['id'] = nhg - entries = set(tbl_nhgm.getKeys()) - nhg_data['endpoints'] = [] - for entry in entries: - status, fvs = tbl_nhgm.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: - nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] - status, nh_fvs = tbl_nh.get(nh_key) - nh_fvs = dict(nh_fvs) - assert status, "Got an error when get a key" - endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] - nhg_data['endpoints'].append(endpoint) - return nhg_data - def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) - - vr_ids = self.vnet_route_ids(dvs, name) - count = len(vr_ids) - - expected_attrs = {} - for idx, endpoint in enumerate(endpoints): - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } - if vni and vni[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) - if mac and mac[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) - if ordered_ecmp == "true" and nh_seq_id: - expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) - expected_attrs[endpoint] = expected_attr - - if nhg: - new_nhg = nhg - elif endpoint_str in self.nhg_ids: - new_nhg = self.nhg_ids[endpoint_str] - else: - new_nhg = get_created_entry(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) - self.nhg_ids[endpoint_str] = new_nhg - self.nhgs.add(new_nhg) - - - # Check routes in ingress VRF - expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", - } - check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) - - # Check nexthop group member - self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) - - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - - #Check if the route is in expected VRF - asic_vrs = set() - for idx in range(count): - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nhg, - } - ) - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) - - assert asic_vrs == vr_ids - - self.routes.update(new_route) - - return new_route, new_nhg - - def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) - new_nhgs = [] - expected_attrs_primary = {} - for idx, endpoint in enumerate(endpoints_primary): - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } - if vni and vni[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) - if mac and mac[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) - expected_attrs_primary[endpoint] = expected_attr - - if len(endpoints_primary) == 1: - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - return new_route - else : - new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) - found_match = False - - for nhg in new_nhgs: - nhg_data = self.get_nexthop_groups(dvs, nhg) - eplist = self.serialize_endpoint_group(nhg_data['endpoints']) - if eplist == self.serialize_endpoint_group(endpoints_primary): - self.nhg_ids[endpoint_str_primary] = nhg - found_match = True - - assert found_match, "the expected Nexthop group was not found." - - # Check routes in ingress VRF - expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", - } - for nhg in new_nhgs: - check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) - - # Check nexthop group member - self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) - - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - - #Check if the route is in expected VRF - active_nhg = self.nhg_ids[endpoint_str_primary] - for idx in range(count): - if prefix != "" and prefix not in new_route[idx] : - continue - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, - } - ) - rt_key = json.loads(new_route[idx]) - - - self.routes.update(new_route) - del self.nhg_ids[endpoint_str_primary] - return new_route - - def check_del_vnet_routes(self, dvs, name, prefixes=[]): - # TODO: Implement for VRF VNET - - def _access_function(): - route_entries = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) - route_prefixes = [json.loads(route_entry)["dest"] for route_entry in route_entries] - return (all(prefix not in route_prefixes for prefix in prefixes), None) - - if prefixes: - wait_for_result(_access_function) - - return True - - def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - key = endpoint + ':' + prefix - check_object(app_db, self.APP_VNET_MONITOR, key, - { - "packet_type": packet_type, - "overlay_dmac" : overlay_dmac - } - ) - return True - - def check_custom_monitor_deleted(self, dvs, prefix, endpoint): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - key = endpoint + ':' + prefix - check_deleted_object(app_db, self.APP_VNET_MONITOR, key) +from vnet_lib import * class TestVnetOrch(object): diff --git a/tests/vnet_lib.py b/tests/vnet_lib.py new file mode 100644 index 0000000000..6bdb2cda97 --- /dev/null +++ b/tests/vnet_lib.py @@ -0,0 +1,1226 @@ +import time +import ipaddress +import json +import time + +from swsscommon import swsscommon +from pprint import pprint +from dvslib.dvs_common import wait_for_result + + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + time.sleep(1) + + +def create_entry_tbl(db, table, separator, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + + +def create_entry_pst(db, table, separator, key, pairs): + tbl = swsscommon.ProducerStateTable(db, table) + create_entry(tbl, key, pairs) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + +def delete_entry_pst(db, table, key): + tbl = swsscommon.ProducerStateTable(db, table) + tbl._del(key) + time.sleep(1) + + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + + +def entries(db, table): + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_exist_entries(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_created_entry(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + return new_entries[0] + + +def get_all_created_entries(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - set(existed_entries)) + assert len(new_entries) >= 0, "Get all could be no new created entries." + new_entries.sort() + return new_entries + + +def get_created_entries(db, table, existed_entries, count): + new_entries = get_all_created_entries(db, table, existed_entries) + assert len(new_entries) == count, "Wrong number of created entries." + return new_entries + + +def get_deleted_entries(db, table, existed_entries, count): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + old_entries = list(existed_entries - entries) + assert len(old_entries) == count, "Wrong number of deleted entries." + old_entries.sort() + return old_entries + + +def get_default_vr_id(dvs): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER' + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert len(keys) == 1, "Wrong number of virtual routers found" + + return keys[0] + + +def check_object(db, table, key, expected_attributes): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + assert len(fvs) >= len(expected_attributes), "Incorrect attributes" + + attr_keys = {entry[0] for entry in fvs} + + for name, value in fvs: + if name in expected_attributes: + assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ + (value, name, expected_attributes[name]) + +def check_deleted_object(db, table, key): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key not in keys, "The desired key is not removed" + + +def create_vnet_local_routes(dvs, prefix, vnet_name, ifname): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl( + conf_db, + "VNET_ROUTE", '|', "%s|%s" % (vnet_name, prefix), + [ + ("ifname", ifname), + ] + ) + + time.sleep(2) + + +def delete_vnet_local_routes(dvs, prefix, vnet_name): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + delete_entry_pst(app_db, "VNET_ROUTE_TABLE", "%s:%s" % (vnet_name, prefix)) + + time.sleep(2) + + +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) + + +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + attrs = [ + ("endpoint", endpoint), + ] + + if vni: + attrs.append(('vni', vni)) + + if mac: + attrs.append(('mac_address', mac)) + + if ep_monitor: + attrs.append(('endpoint_monitor', ep_monitor)) + + if profile: + attrs.append(('profile', profile)) + + if primary: + attrs.append(('primary', primary)) + + if monitoring: + attrs.append(('monitoring', monitoring)) + + if adv_prefix: + attrs.append(('adv_prefix', adv_prefix)) + + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") + fvs = swsscommon.FieldValuePairs(attrs) + tbl.set("%s|%s" % (vnet_name, prefix), fvs) + + time.sleep(2) + + +def delete_vnet_routes(dvs, prefix, vnet_name): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + delete_entry_pst(app_db, "VNET_ROUTE_TUNNEL_TABLE", "%s:%s" % (vnet_name, prefix)) + + time.sleep(2) + + +def create_vlan(dvs, vlan_name, vlan_ids): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + vlan_id = vlan_name[4:] + + # create vlan + create_entry_tbl( + conf_db, + "VLAN", '|', vlan_name, + [ + ("vlanid", vlan_id), + ], + ) + + time.sleep(1) + + vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids) + + check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid, + { + "SAI_VLAN_ATTR_VLAN_ID": vlan_id, + } + ) + + return vlan_oid + + +def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + + vlan_oid = create_vlan (dvs, vlan_name, vlan_ids) + + # create a vlan member in config db + create_entry_tbl( + conf_db, + "VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname), + [ + ("tagging_mode", "untagged"), + ], + ) + + time.sleep(1) + + # create vlan interface in config db + create_entry_tbl( + conf_db, + "VLAN_INTERFACE", '|', vlan_name, + [ + ("vnet_name", vnet_name), + ("proxy_arp", "enabled"), + ], + ) + + #FIXME - This is created by IntfMgr + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_entry_pst( + app_db, + "INTF_TABLE", ':', vlan_name, + [ + ("vnet_name", vnet_name), + ("proxy_arp", "enabled"), + ], + ) + time.sleep(2) + + create_entry_tbl( + conf_db, + "VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr), + [ + ("family", "IPv4"), + ], + ) + + time.sleep(2) + + return vlan_oid + + +def delete_vlan_interface(dvs, ifname, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "VLAN_INTERFACE", "%s|%s" % (ifname, ipaddr)) + + time.sleep(2) + + delete_entry_tbl(conf_db, "VLAN_INTERFACE", ifname) + + time.sleep(2) + + +def create_phy_interface(dvs, ifname, vnet_name, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + + # create vlan interface in config db + create_entry_tbl( + conf_db, + "INTERFACE", '|', ifname, + [ + ("vnet_name", vnet_name), + ], + ) + + #FIXME - This is created by IntfMgr + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_entry_pst( + app_db, + "INTF_TABLE", ':', ifname, + [ + ("vnet_name", vnet_name), + ], + ) + time.sleep(2) + + create_entry_tbl( + conf_db, + "INTERFACE", '|', "%s|%s" % (ifname, ipaddr), + [ + ("family", "IPv4"), + ], + ) + + +def delete_phy_interface(dvs, ifname, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "INTERFACE", "%s|%s" % (ifname, ipaddr)) + + time.sleep(2) + + delete_entry_tbl(conf_db, "INTERFACE", ifname) + + time.sleep(2) + + +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + attrs = [ + ("vxlan_tunnel", tunnel), + ("vni", vni), + ("peer_list", peer_list), + ] + + if scope: + attrs.append(('scope', scope)) + + if advertise_prefix: + attrs.append(('advertise_prefix', 'true')) + + if overlay_dmac: + attrs.append(('overlay_dmac', overlay_dmac)) + + # create the VXLAN tunnel Term entry in Config DB + create_entry_tbl( + conf_db, + "VNET", '|', name, + attrs, + ) + + time.sleep(2) + + +def delete_vnet_entry(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "VNET", "%s" % (name)) + + time.sleep(2) + + +def create_vxlan_tunnel(dvs, name, src_ip): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + attrs = [ + ("src_ip", src_ip), + ] + + # create the VXLAN tunnel Term entry in Config DB + create_entry_tbl( + conf_db, + "VXLAN_TUNNEL", '|', name, + attrs, + ) + +def delete_vxlan_tunnel(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) + +def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + # create the VXLAN tunnel map entry in Config DB + create_entry_tbl( + conf_db, + "VXLAN_TUNNEL_MAP", '|', "%s|%s" % (tunnel_name, tunnel_map_entry_name), + [ + ("vni", vni_id), + ("vlan", vlan), + ], + ) + + +def get_lo(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + vr_id = get_default_vr_id(dvs) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + + entries = tbl.getKeys() + lo_id = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': + lo_id = entry + break + else: + assert False, 'Don\'t found loopback id' + + return lo_id + + +def get_switch_mac(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH') + + entries = tbl.getKeys() + mac = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS': + mac = value + break + else: + assert False, 'Don\'t found switch mac' + + return mac + + +def check_linux_intf_arp_proxy(dvs, ifname): + (exitcode, out) = dvs.runcmd("cat /proc/sys/net/ipv4/conf/{0}/proxy_arp_pvlan".format(ifname)) + assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" + + +def update_bfd_session_state(dvs, addr, state): + bfd_id = get_bfd_session_id(dvs, addr) + assert bfd_id is not None + + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + +def update_monitor_session_state(dvs, addr, monitor, state): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + create_entry_tbl( + state_db, + "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), + [ + ("state", state), + ] + ) + +def get_bfd_session_id(dvs, addr): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") + entries = set(tbl.getKeys()) + for entry in entries: + status, fvs = tbl.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true": + return entry + + return None + + +def check_del_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is None + + +def check_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is not None + + +def check_state_db_routes(dvs, vnet, prefix, endpoints): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + + status, fvs = tbl.get(vnet + '|' + prefix) + assert status, "Got an error when get a key" + + fvs = dict(fvs) + assert fvs['active_endpoints'] == ','.join(endpoints) + + if endpoints: + assert fvs['state'] == 'active' + else: + assert fvs['state'] == 'inactive' + + +def check_remove_state_db_routes(dvs, vnet, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + keys = tbl.getKeys() + + assert vnet + '|' + prefix not in keys + + +def check_routes_advertisement(dvs, prefix, profile=""): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix in keys + + if profile: + status, fvs = tbl.get(prefix) + assert status, "Got an error when get a key" + fvs = dict(fvs) + assert fvs['profile'] == profile + + +def check_remove_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix not in keys + + +def check_syslog(dvs, marker, err_log): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() == "0" + + +def create_fvs(**kwargs): + return swsscommon.FieldValuePairs(list(kwargs.items())) + + +def create_subnet_decap_tunnel(dvs, tunnel_name, **kwargs): + """Create tunnel and verify all needed entries in state DB exists.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + fvs = create_fvs(**kwargs) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps.set(tunnel_name, fvs) + + # wait till config will be applied + time.sleep(1) + + # validate the tunnel entry in state db + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + tunnels = tunnel_state_table.getKeys() + for tunnel in tunnels: + status, fvs = tunnel_state_table.get(tunnel) + assert status == True + + for field, value in fvs: + if field == "tunnel_type": + assert value == "IPINIP" + elif field == "dscp_mode": + assert value == kwargs["dscp_mode"] + elif field == "ecn_mode": + assert value == kwargs["ecn_mode"] + elif field == "ttl_mode": + assert value == kwargs["ttl_mode"] + elif field == "encap_ecn_mode": + assert value == kwargs["encap_ecn_mode"] + else: + assert False, "Field %s is not tested" % field + + +def delete_subnet_decap_tunnel(dvs, tunnel_name): + """Delete tunnel and checks that state DB is cleared.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tunnel_app_table = swsscommon.Table(appdb, "TUNNEL_DECAP_TABLE") + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps._del(tunnel_name) + + # wait till config will be applied + time.sleep(1) + + assert len(tunnel_app_table.getKeys()) == 0 + assert len(tunnel_state_table.getKeys()) == 0 + + +loopback_id = 0 +def_vr_id = 0 +switch_mac = None + +def update_bgp_global_dev_state(dvs, state): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + create_entry_tbl( + config_db, + "BGP_DEVICE_GLOBAL",'|',"STATE", + [ + ("tsa_enabled", state), + ] + ) + +def set_tsa(dvs): + update_bgp_global_dev_state(dvs, "true") + +def clear_tsa(dvs): + update_bgp_global_dev_state(dvs, "false") + +class VnetVxlanVrfTunnel(object): + + ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" + ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP" + ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" + ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" + ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" + ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" + ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" + ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" + + ecn_modes_map = { + "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", + "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" + } + + dscp_modes_map = { + "pipe" : "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL" + } + + ttl_modes_map = { + "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" + } + + def __init__(self): + self.tunnel_map_ids = set() + self.tunnel_map_entry_ids = set() + self.tunnel_ids = set() + self.tunnel_term_ids = set() + self.ipinip_tunnel_term_ids = {} + self.tunnel_map_map = {} + self.tunnel = {} + self.vnet_vr_ids = set() + self.vr_map = {} + self.nh_ids = {} + self.nhg_ids = {} + + def fetch_exist_entries(self, dvs): + self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) + self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE) + self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP) + self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY) + self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY) + self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE) + self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) + self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) + self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) + + global loopback_id, def_vr_id, switch_mac + if not loopback_id: + loopback_id = get_lo(dvs) + + if not def_vr_id: + def_vr_id = get_default_vr_id(dvs) + + if switch_mac is None: + switch_mac = get_switch_mac(dvs) + + def check_ipinip_tunnel(self, dvs, tunnel_name, dscp_mode, ecn_mode, ttl_mode): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_attrs = { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE': self.dscp_modes_map[dscp_mode], + 'SAI_TUNNEL_ATTR_ENCAP_ECN_MODE': self.ecn_modes_map[ecn_mode], + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': self.ttl_modes_map[ttl_mode] + } + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, tunnel_attrs) + + self.tunnel_ids.add(tunnel_id) + self.tunnel[tunnel_name] = tunnel_id + + def check_del_ipinip_tunnel(self, dvs, tunnel_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id) + self.tunnel_ids.remove(tunnel_id) + assert tunnel_id == self.tunnel[tunnel_name] + self.tunnel.pop(tunnel_name) + + def check_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + tunnel_term_attrs = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': str(dst_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK': str(dst_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP': str(src_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK': str(src_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': self.tunnel[tunnel_name] + } + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, tunnel_term_attrs) + + self.tunnel_term_ids.add(tunnel_term_id) + self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] = tunnel_term_id + + def check_del_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id) + self.tunnel_term_ids.remove(tunnel_term_id) + assert self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] == tunnel_term_id + self.ipinip_tunnel_term_ids.pop((tunnel_name, src_ip, dst_ip)) + + def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global loopback_id, def_vr_id + + tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + + # check that the vxlan tunnel termination are there + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[2], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[3], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VNI', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, + { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', + 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, + 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[0], tunnel_map_id[2]), + 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[1], tunnel_map_id[3]), + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + } + ) + + expected_attributes = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id, + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip, + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id, + } + + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes) + + self.tunnel_map_ids.update(tunnel_map_id) + self.tunnel_ids.add(tunnel_id) + self.tunnel_term_ids.add(tunnel_term_id) + self.tunnel_map_map[tunnel_name] = tunnel_map_id + self.tunnel[tunnel_name] = tunnel_id + + def check_del_vxlan_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) + self.tunnel_ids.remove(old_tunnel[0]) + + old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + for old_tunnel_map in old_tunnel_maps: + check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) + self.tunnel_map_ids.remove(old_tunnel_map) + + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + time.sleep(2) + + if (self.tunnel_map_map.get(tunnel_name) is None): + tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + else: + tunnel_map_id = self.tunnel_map_map[tunnel_name] + + tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) + + # check that the vxlan tunnel termination are there + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" + + check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'), + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'), + } + ) + + self.tunnel_map_entry_ids.update(tunnel_map_entry_id) + + def check_vnet_entry(self, dvs, name, peer_list=[]): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + #Assert if there are linklocal entries + tbl = swsscommon.Table(app_db, "VNET_ROUTE_TUNNEL_TABLE") + route_entries = tbl.getKeys() + assert "ff00::/8" not in route_entries + assert "fe80::/64" not in route_entries + + #Check virtual router objects + assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 1),\ + "The VR objects are not created" + + new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 1) + + self.vnet_vr_ids.update(new_vr_ids) + self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[0], 'peer':peer_list } + + def check_default_vnet_entry(self, dvs, name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + #Check virtual router objects + assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids)),\ + "Some VR objects are created" + #Mappers for default VNET is created with default VR objects. + self.vr_map[name] = { 'ing':list(self.vnet_vr_ids)[0], 'egr':list(self.vnet_vr_ids)[0], 'peer':[] } + + def check_del_vnet_entry(self, dvs, name): + # TODO: Implement for VRF VNET + return True + + def vnet_route_ids(self, dvs, name, local=False): + vr_set = set() + + vr_set.add(self.vr_map[name].get('ing')) + + try: + for peer in self.vr_map[name].get('peer'): + vr_set.add(self.vr_map[peer].get('ing')) + except IndexError: + pass + + return vr_set + + def check_router_interface(self, dvs, intf_name, name, vlan_oid=0): + # Check RIF in ingress VRF + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global switch_mac + + expected_attr = { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'), + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac, + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + } + + if vlan_oid: + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'}) + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid}) + else: + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'}) + + new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs) + check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr) + + #IP2ME route will be created with every router interface + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, 1) + + if vlan_oid: + expected_attr = { 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } + check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + + expected_attr = { 'SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } + check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + + check_linux_intf_arp_proxy(dvs, intf_name) + + self.rifs.add(new_rif) + self.routes.update(new_route) + + def check_del_router_interface(self, dvs, name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_rif = get_deleted_entries(asic_db, self.ASIC_RIF_TABLE, self.rifs, 1) + check_deleted_object(asic_db, self.ASIC_RIF_TABLE, old_rif[0]) + + self.rifs.remove(old_rif[0]) + + def check_vnet_local_routes(self, dvs, name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vnet_route_ids(dvs, name, True) + count = len(vr_ids) + + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Routes are not replicated to egress VRF, return if count is 0, else check peering + if not count: + return + + asic_vrs = set() + for idx in range(count): + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + def check_del_vnet_local_routes(self, dvs, name): + # TODO: Implement for VRF VNET + return True + + def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0, route_ids=""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vnet_route_ids(dvs, name) + count = len(vr_ids) + + # Check routes in ingress VRF + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + + if vni: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) + + if mac: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) + + if endpoint in self.nh_ids: + new_nh = self.nh_ids[endpoint] + else: + new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops) + self.nh_ids[endpoint] = new_nh + self.nhops.add(new_nh) + + check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr) + if not route_ids: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + else: + new_route = route_ids + + #Check if the route is in expected VRF + asic_vrs = set() + for idx in range(count): + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh, + } + ) + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + return new_route + + def serialize_endpoint_group(self, endpoints): + endpoints.sort() + return ",".join(endpoints) + + def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): + expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + entries = set(tbl_nhgm.getKeys()) + endpoints = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + endpoints.append(endpoint) + assert endpoint in expected_attrs + if ordered_ecmp == "true": + assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) + + assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str + + def get_nexthop_groups(self, dvs, nhg): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + nhg_data = {} + nhg_data['id'] = nhg + entries = set(tbl_nhgm.getKeys()) + nhg_data['endpoints'] = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + nhg_data['endpoints'].append(endpoint) + return nhg_data + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) + + vr_ids = self.vnet_route_ids(dvs, name) + count = len(vr_ids) + + expected_attrs = {} + for idx, endpoint in enumerate(endpoints): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + if ordered_ecmp == "true" and nh_seq_id: + expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) + expected_attrs[endpoint] = expected_attr + + if nhg: + new_nhg = nhg + elif endpoint_str in self.nhg_ids: + new_nhg = self.nhg_ids[endpoint_str] + else: + new_nhg = get_created_entry(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + self.nhg_ids[endpoint_str] = new_nhg + self.nhgs.add(new_nhg) + + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", + } + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + asic_vrs = set() + for idx in range(count): + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + return new_route, new_nhg + + def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) + new_nhgs = [] + expected_attrs_primary = {} + for idx, endpoint in enumerate(endpoints_primary): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + expected_attrs_primary[endpoint] = expected_attr + + if len(endpoints_primary) == 1: + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + return new_route + else : + new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + found_match = False + + for nhg in new_nhgs: + nhg_data = self.get_nexthop_groups(dvs, nhg) + eplist = self.serialize_endpoint_group(nhg_data['endpoints']) + if eplist == self.serialize_endpoint_group(endpoints_primary): + self.nhg_ids[endpoint_str_primary] = nhg + found_match = True + + assert found_match, "the expected Nexthop group was not found." + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + for nhg in new_nhgs: + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + active_nhg = self.nhg_ids[endpoint_str_primary] + for idx in range(count): + if prefix != "" and prefix not in new_route[idx] : + continue + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + + + self.routes.update(new_route) + del self.nhg_ids[endpoint_str_primary] + return new_route + + def check_del_vnet_routes(self, dvs, name, prefixes=[], absent=False): + # TODO: Implement for VRF VNET + + def _access_function(): + route_entries = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + route_prefixes = [json.loads(route_entry)["dest"] for route_entry in route_entries] + return (all(prefix not in route_prefixes for prefix in prefixes), None) + + if absent: + return True if _access_function()== None else False + elif prefixes: + wait_for_result(_access_function) + + return True + + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) diff --git a/tlm_teamd/values_store.cpp b/tlm_teamd/values_store.cpp index a955d36be7..957194b4c5 100644 --- a/tlm_teamd/values_store.cpp +++ b/tlm_teamd/values_store.cpp @@ -281,6 +281,14 @@ void ValuesStore::remove_keys_db(const std::vector & keys) const auto & p = split_key(key); const auto & table_name = p.first; const auto & table_key = p.second; + // Do not delete te key from State Db for table LAB_TABLE. LAB_TABLE entry is created/deleted + // from teamsyncd on detecting netlink of teamd dev as up/down. For some reason + // if we do not get state dump from teamdctl it might be transient issue. If it is + // persistent issue then teamsyncd might be able to catch it and delete state db entry + // or we can keep entry in it's current state as best effort. Similar to try_add_lag which is best effort + // to connect to teamdctl and if it fails we do not delete State Db entry. + if (table_name == "LAG_TABLE") + continue; swss::Table table(m_db, table_name); table.del(table_key); } @@ -366,13 +374,7 @@ void ValuesStore::update(const std::vector & dumps) { const auto & storage = from_json(dumps); const auto & old_keys = get_old_keys(storage); - // Do not delete te key from State Db. State DB LAB_TABLE entry is created/deleted - // from teamsyncd on detecting netlink of teamd dev as up/down. For some reason - // if we do not get state dump from teamdctl it might be transient issue. If it is - // persistent issue then teamsyncd might be able to catch it and delete state db entry - // or we can keep entry in it's current state as best effort. Similar to try_add_lag which is best effort - // to connect to teamdctl and if it fails we do not delete State Db entry. - //remove_keys_db(old_keys); + remove_keys_db(old_keys); remove_keys_storage(old_keys); const auto & keys_to_refresh = update_storage(storage); update_db(storage, keys_to_refresh);