From 9c2478bec4a3a42b3fe05e596e017077c53c1220 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 10 May 2022 15:28:30 +0000 Subject: [PATCH 01/66] First cut of few events' definitons --- .../yang-events/sonic-events-bgp.yang | 62 +++++++ .../yang-events/sonic-events-common.yang | 46 +++++ .../yang-events/sonic-events-dhcp_relay.yang | 67 +++++++ .../yang-events/sonic-events-host.yang | 168 ++++++++++++++++++ .../yang-events/sonic-events-pmon.yang | 36 ++++ .../yang-events/sonic-events-swss.yang | 112 ++++++++++++ .../yang-events/sonic-events-syncd.yang | 54 ++++++ 7 files changed, 545 insertions(+) create mode 100644 src/sonic-yang-models/yang-events/sonic-events-bgp.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-common.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-host.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-pmon.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-swss.yang create mode 100644 src/sonic-yang-models/yang-events/sonic-events-syncd.yang diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang new file mode 100644 index 000000000000..1512840fd3c6 --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -0,0 +1,62 @@ +module sonic-events-bgp { + namespace "http://github.com/Azure/sonic-events-bgp"; + + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + + revision 2022-12-01 { + description "BGP alert events."; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC BGP events"; + + container bgp-state { + description " + Declares an event for BGP state for a neighbor IP + The status says up or down"; + + leaf ip { + type inet:ip-address; + description "IP of neighbor"; + } + + leaf status { + type enumeration { + enum "up"; + enum "down"; + } + description "Provides the status as up (true) or down (false)"; + } + + uses evtcmn:sonic-events-cmn; + } + + container bgp-hold-timer { + description " + Declares an event for BGP hold timer expiry. + This event does not have any other parameter. + Hence source + tag identifies an event"; + + uses evtcmn:sonic-events-cmn; + } + + container zebra-no-buff { + description " + Declares an event for zebra running out of buffer. + This event does not have any other parameter. + Hence source + tag identifies an event"; + + uses evtcmn:sonic-events-cmn; + } +} + diff --git a/src/sonic-yang-models/yang-events/sonic-events-common.yang b/src/sonic-yang-models/yang-events/sonic-events-common.yang new file mode 100644 index 000000000000..bbff8f87796e --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-common.yang @@ -0,0 +1,46 @@ +module sonic-events-common { + namespace "http://github.com/Azure/sonic-events-common"; + prefix evtcmn; + yang-version 1.1; + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC Events common definition"; + + revision 2022-12-01 { + description + "Common reusable definitions"; + } + + grouping sonic-events-cmn { + leaf timestamp { + type yang::date-and-time; + description "time of the event"; + } + } + + grouping sonic-events-usage { + leaf usage { + type uint8 { + range "0..100" { + error-message "Incorrect val for %"; + } + } + description "Percentage in use"; + } + + leaf limit { + type uint8 { + range "0..100" { + error-message "Incorrect val for %"; + } + } + description "Percentage limit set"; + } + } +} diff --git a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang new file mode 100644 index 000000000000..b9e67c1fceeb --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang @@ -0,0 +1,67 @@ +module sonic-events-dhcp-relay { + namespace "http://github.com/sonic-net/sonic-events-dhcp-relay"; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + + revision 2022-12-01 { + description "dhcp-relay alert events."; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC dhcp-relay events"; + + container dhcp-relay-discard { + description " + Declares an event for dhcp-relay discarding packet on an + interface due to missing IP address assigned. + Params: + name of the interface discarding. + class of the missing IP address as IPv4 or IPv6."; + + leaf ip_class { + type enumeration { + enum "ipV4"; + enum "ipV6"; + } + description "Class of IP address missing"; + } + + leaf ifname { + type string; + description "Name of the i/f discarding"; + } + + uses evtcmn:sonic-events-cmn; + } + + container dhcp-relay-disparity { + description " + Declares an event for disparity detected in + DHCP Relay behavior by dhcpmon. + parameters: + vlan that shows this disparity + The duration of disparity"; + + leaf vlan { + type string; + description "Name of the vlan affected"; + } + + leaf duration { + type uint32; + description "Duration of disparity"; + } + + uses evtcmn:sonic-events-cmn; + } +} + diff --git a/src/sonic-yang-models/yang-events/sonic-events-host.yang b/src/sonic-yang-models/yang-events/sonic-events-host.yang new file mode 100644 index 000000000000..d9c20b9ae358 --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-host.yang @@ -0,0 +1,168 @@ +module events-host { + namespace "http://github.com/sonic-net/sonic-events-host"; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + revision 2022-12-01 { + description "BGP alert events."; + } + + container disk-usage { + description " + Declares an event for disk usage crossing set limit + The parameters describe the usage & limit set."; + + leaf fs { + type string; + description "Name of the file system"; + default ""; + } + + uses evtcmn:sonic-events-usage; + + uses evtcmn:sonic-events-cmn; + } + + container memory-usage { + description " + Declares an event for memory usage crossing set limit + The parameters describe the usage & limit set."; + + uses evtcmn:sonic-events-usage; + + uses evtcmn:sonic-events-cmn; + } + + container event-sshd { + description " + Declares an event reported by sshd. + The fail type declares the type of failure. + INCORRECT_PASSWORD - denotes that sshd is sending + wrong password to AAA to intentionally fail this login."; + + leaf fail_type { + type enumeration { + enum "INCORRECT_PASSWD"; + } + description "Type of failure"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-disk { + description " + Declares an event reported by disk check. + The fail type declares the type of failure. + read-only - denotes that disk is in RO state."; + + leaf fail_type { + type enumeration { + enum "read_only"; + } + description "Type of failure"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-kernel { + description " + Declares an event reported by kernel. + The fail type declares the type of failure."; + + leaf fail_type { + type enumeration { + enum "write_failed"; + enum "write_protected"; + enum "remount_read_only"; + enum "aufs_read_lock"; + enum "invalid_freelist"; + enum "zlib_decompress"; + } + description "Type of failure"; + } + + leaf msg { + type string; + description "human readable hint text"; + default ""; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-monit-proc { + description " + Declares an event reported by monit for a process + that is not running. + + Params: + Name of the process that is not running. + The ASIC-index of that process."; + + leaf proc_name { + type string; + description "Name of the process not running"; + default ""; + } + + leaf asic_index { + type uint8; + description "ASIC index in case of multi asic platform"; + default 0; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-monit-status { + description " + Declares an event reported by monit for status check + failure for a process + + Params: + Name of the process that is not running. + The ASIC-index of that process."; + + leaf entity { + type string; + description "Name of the failing entity"; + default ""; + } + + leaf asic_index { + type uint8; + description "ASIC index in case of multi asic platform"; + default 0; + } + + leaf reason { + type string; + description "Human readble text explaining failure"; + default ""; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-platform { + description " + Declares an event for platform related failure. + Params: + fail_type provides the type of failure."; + + leaf fail_type { + type enumeration { + enum "watchdog_timeout"; + enum "switch_parity_error"; + enum "SEU_error"; + } + description "Type of failure"; + } + + uses evtcmn:sonic-events-cmn; + } +} diff --git a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang new file mode 100644 index 000000000000..865ed2a65f8a --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang @@ -0,0 +1,36 @@ +module sonic-events-pmon { + namespace "http://github.com/sonic-net/sonic-events-pmon"; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + + revision 2022-12-01 { + description "pmon alert events."; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC pmon events"; + + + container pmon-exited { + description " + Declares an event reportes by pmon for an unexpected exit. + The exited entity is the only param"; + + leaf entity { + type string; + description "entity that had unexpected exit"; + } + + uses evtcmn:sonic-events-cmn; + } +} + diff --git a/src/sonic-yang-models/yang-events/sonic-events-swss.yang b/src/sonic-yang-models/yang-events/sonic-events-swss.yang new file mode 100644 index 000000000000..1afa6947188d --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-swss.yang @@ -0,0 +1,112 @@ +module sonic-events-swss { + namespace "http://github.com/sonic-net/sonic-events-swss"; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + + revision 2022-12-01 { + description "SWSS alert events."; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC SWSS events"; + + container redis-generic { + description " + Declares an event for a fatal error encountered by swss. + The asic-index of the failing process is the only param."; + + leaf asic_index { + type uint8; + description "ASIC index in case of multi asic platform"; + default 0; + } + + uses evtcmn:sonic-events-cmn; + } + + container if-state { + description " + Declares an event for i/f flap. + + The name of the flapping i/f and status are the only params."; + + leaf ifname { + type string; + description "Interface name"; + } + + leaf status { + type enumeration { + enum "up"; + enum "down"; + } + description "Provides the status as up (true) or down (false)"; + } + + uses evtcmn:sonic-events-cmn; + + } + + container pfc-storm { + description " + Declares an event for PFC storm. + + The name of the i/f facing the storm is the only param."; + + leaf ifname { + type string; + description "Interface name"; + } + + leaf queue_index { + type uint8; + } + + leaf queue_id { + type uint64_t; + } + + leaf port_id { + type uint64_t; + } + + leaf timestamp { + type yang::date-and-time; + description "time of the event"; + } + } + + container chk_crm_threshold { + description " + Declares an event for CRM threshold."; + + leaf percent { + type uint8 { + range "0..100" { + error-message "Invalid percentage value"; + } + } + description "percentage used"; + } + + leaf used_cnt { + type uint8; + } + + leaf free_cnt { + type uint64_t; + } + + uses evtcmn:sonic-events-cmn; + } +} + diff --git a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang new file mode 100644 index 000000000000..bf7e332c62f5 --- /dev/null +++ b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang @@ -0,0 +1,54 @@ +module sonic-events-syncd { + namespace "http://github.com/sonic-net/sonic-events-syncd"; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + } + + revision 2022-12-01 { + description "syncd alert events."; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC syncd events"; + + container syncd_failure { + description " + Declares an event for all types of syncd failure. + The type of failure and the asic-index of failing syncd are + provided along with a human readable message to give the + dev debugging additional info."; + + leaf asic_index { + type uint8; + description "ASIC index in case of multi asic platform"; + default 0; + } + + leaf fail_type { + type enumeration { + enum "route_add_failed"; + enum "switch_event_2"; + enum "brcm_sai_switch_assert"; + enum "assert"; + enum "mmu_err"; + } + } + + leaf msg { + type string; + description "human readable hint text" + default ""; + } + + uses evtcmn:sonic-events-cmn; + } +} + From 4bba076cfb9e9db4b8d0fd75ebc4f6f400a5c9f5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 18 May 2022 17:15:03 +0000 Subject: [PATCH 02/66] first cut --- src/sonic-eventd/Makefile | 42 +++++ src/sonic-eventd/eventd/eventd.cpp | 249 +++++++++++++++++++++++++++++ src/sonic-eventd/eventd/eventd.h | 104 ++++++++++++ 3 files changed, 395 insertions(+) create mode 100644 src/sonic-eventd/Makefile create mode 100644 src/sonic-eventd/eventd/eventd.cpp create mode 100644 src/sonic-eventd/eventd/eventd.h diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile new file mode 100644 index 000000000000..07c0ef41dca0 --- /dev/null +++ b/src/sonic-eventd/Makefile @@ -0,0 +1,42 @@ +RM := rm -rf +EVENTD_TARGET := eventd +CP := cp +MKDIR := mkdir +CC := g++ +MV := mv +LIBS := -levent -lswsscommon -pthread -lzmq +CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common +PWD := $(shell pwd) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(strip $(C_DEPS)),) +-include $(C_DEPS) $(OBJS) +endif +endif + +-include src/subdir.mk + +all: sonic-eventd + +sonic-eventd: $(OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TARGET) $(OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +install: + $(MKDIR) -p $(DESTDIR)/usr/sbin + $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + +deinstall: + $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) + $(RM) -rf $(DESTDIR)/usr/sbin + +clean: + -$(RM) $(EXECUTABLES) $(C_DEPS) $(OBJS) $(EVENTD_TARGET) + -@echo ' ' + +.PHONY: all clean dependents + + diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp new file mode 100644 index 000000000000..18e47ed403d5 --- /dev/null +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -0,0 +1,249 @@ +#include +#include "eventd.h" + +/* + * There are 3 threads, including the main + * + * main thread -- Runs eventd service that accepts commands event_req_type_t + * This can be used to control caching events and a no-op echo service. + * + * capture/cache service + * Saves all the events between cache start & stop + * + * Main proxy service that runs XSUB/XPUB ends + */ + +#define READ_SET_SIZE 100 + + +eventd_server::eventd_server() : m_capture(NULL) +{ + m_ctx = zmq_ctx_new(); + RET_ON_ERR(m_ctx != NULL, "Failed to get zmq ctx"); +out: + return; +} + + +eventd_server::~eventd_server() +{ + close(); +} + + +int +eventd_server::zproxy_service() +{ + int ret = -1; + SWSS_LOG_INFO("Start xpub/xsub proxy"); + + void *frontend = zmq_socket(m_ctx, ZMQ_XSUB); + RET_ON_ERR(frontend != NULL, "failing to get ZMQ_XSUB socket"); + + int rc = zmq_bind(frontend, get_config(XSUB_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind XSUB to %s", get_config(XSUB_END_KEY)); + + void *backend = zmq_socket(m_ctx, ZMQ_XPUB); + RET_ON_ERR(backend != NULL, "failing to get ZMQ_XPUB socket"); + + rc = zmq_bind(backend, get_config(XPUB_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(XPUB_END_KEY)); + + void *capture = zmq_socket(m_ctx, ZMQ_PUB); + RET_ON_ERR(capture != NULL, "failing to get ZMQ_XSUB socket"); + + rc = zmq_bind(capture, get_config(CAPTURE_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind PAIR to %s", get_config(PAIR_END_KEY)); + + m_thread_proxy = thread(&eventd_server::zproxy_service_run, this, frontend, + backend, capture); + ret = 0; +out: + return ret; +} + + +void +eventd_server::zproxy_service_run(void *frontend, void *backend, void *capture) +{ + SWSS_LOG_INFO("Running xpub/xsub proxy"); + + /* runs forever until zmq context is terminated */ + zmq_proxy(frontend, backend, capture); + + zmq_close(frontend); + zmq_close(backend); + zmq_close(capture); + + SWSS_LOG_ERR("Terminating xpub/xsub proxy"); + + return 0; +} + + +int +eventd_server::capture_events() +{ + /* clean any pre-existing cache */ + int ret = -1; + + vector().swap(m_events); + map.swap(m_last_events); + + RET_ON_ERR(m_capture != NULL, "capture sock is not initialized yet"); + + while(true) { + zmq_msg_t msg; + internal_event_t event; + int more = 0; + size_t more_size = sizeof (more); + + { + zmq_msg_t pat; + zmq_msg_init(&pat); + RET_ON_ERR(zmq_msg_recv(&pat, m_capture, 0) != -1, + "Failed to capture pattern"); + zmq_msg_close(&pat); + } + + RET_ON_ERR(zmq_getsockopt (m_capture, ZMQ_RCVMORE, &more, &more_size) == 0, + "Failed to get sockopt for capture sock"); + RET_ON_ERR(more, "Event data expected, but more is false"); + + zmq_msg_init(&msg); + RET_ON_ERR(zmq_msg_recv(&msg, m_capture, 0) != -1, + "Failed to read event data"); + + string s((const char *)zmq_msg_data(&msg), zmq_msg_size(&msg)); + zmq_msg_close(&msg); + + deserialize(s, event); + + m_last_events[event[EVENT_RUNTIME_ID]] = s; + + try + { + m_events.push_back(s); + } + catch (exception& e) + { + stringstream ss; + ss << e.what(); + SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", + ss.str().c_str(), m_events.size()); + goto out; + } + } +out: + /* Destroy the service and exit the thread */ + close(); + return 0; +} + + +int +eventd_server::eventd_service() +{ + event_service service; + + RET_ON_ERR(zproxy_service() == 0, "Failed to start zproxy_service"); + + RET_ON_ERR(service.init_server(m_ctx) == 0, "Failed to init service"); + + while(true) { + int code, resp = -1; + vector req_data, resp_data; + + RET_ON_ERR(channel_read(code, data) == 0, + "Failed to read request"); + + switch(code) { + case EVENT_CACHE_START: + if (m_capture != NULL) { + resp_code = 1; + break; + } + m_capture = zmq_socket(m_ctx, ZMQ_SUB); + RET_ON_ERR(capture != NULL, "failing to get ZMQ_XSUB socket"); + + rc = zmq_connect(capture, get_config(CAPTURE_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind PAIR to %s", get_config(PAIR_END_KEY)); + + rc = zmq_setsockopt(sub_read, ZMQ_SUBSCRIBE, "", 0); + RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); + + /* Kick off the service */ + m_thread_capture = thread(&eventd_server::capture_events, this); + + resp_code = 0; + break; + + + case EVENT_CACHE_STOP: + resp_code = 0; + if (m_capture != NULL) { + close(m_capture); + m_capture = NULL; + + /* Wait for thread to end */ + m_thread_capture.join(); + } + break; + + + case EVENT_CACHE_READ: + resp_code = 0; + + if (m_events.empty()) { + for (last_events_t::iterator it = m_last_events.begin(); + it != m_last_events.end(); ++it) { + m_events.push_back(it->second); + } + last_events_t().swap(m_last_events); + } + + int sz = m_events.size() < READ_SET_SIZE ? m_events.size() : READ_SET_SIZE; + + auto it = std::next(m_events.begin(), sz); + move(m_events.begin(), m_events.end(), back_inserter(resp_data)); + + if (sz == m_events.size()) { + events_data_lst_t().swap(m_events); + } else { + m_events.erase(m_events.begin(), it); + } + break; + + + case EVENT_ECHO: + resp_code = 0; + resp_data.swap(req_data); + + default: + SWSS_LOG_ERROR("Unexpected request: %d", code); + assert(false); + break; + } + RET_ON_ERR(channel_write(resp_code, resp_data) == 0, + "Failed to write response back"); + } +out: + /* Breaks here on fatal failure */ + if (m_capture != NULL) { + close(m_capture); + m_capture = NULL; + } + close(); + m_thread_proxy.join(); + m_thread_capture.join(); + return 0; +} + + + +void eventd_server::close() +{ + zmq_ctx_term(m_ctx); m_ctx = NULL; + +} + diff --git a/src/sonic-eventd/eventd/eventd.h b/src/sonic-eventd/eventd/eventd.h new file mode 100644 index 000000000000..3a7d480ca1dd --- /dev/null +++ b/src/sonic-eventd/eventd/eventd.h @@ -0,0 +1,104 @@ +/* + * Header file for eventd daemon + */ +#include "events_service.h" + +typedef map last_events_t; + +class eventd_server { + public: + /* Creates the zmq context */ + eventd_server(); + + ~eventd_server(); + + /* + * Main eventd service loop that honors event_req_type_t + * + * For echo, it just echoes + * + * FOr cache start, create the SUB end of capture and kick off + * capture_events thread. Upon cache stop command, close the handle + * which will stop the caching thread with read failure. + * + * for cache read, returns the collected events as subset of + * strings. + * + */ + int eventd_service(); + + + /* + * For any fatal failure, terminate the entire run across threads + * by deleting the zmq context. + */ + void close(); + + private: + /* + * Started by eventd_service. + * Creates XPUB & XSUB end points. + * Bind the same + * Create a PUB socket end point for capture and bind. + * Call run_proxy method with sockets in a dedicated thread. + * Thread runs forever until the zmq context is terminated. + */ + int zproxy_service(); + int zproxy_service_run(void *front, void *back, void *capture); + + + /* + * Capture/Cache service + * + * The service started in a dedicted thread upon demand. + * It expects SUB end of capture created & connected to the PUB + * capture end in zproxy service. + * + * It goes in a forever loop, until the zmq read fails, which will happen + * if the capture/SUB end is closed. The stop cache service will close it, + * while start cache service creates & connects. + * + * Hence this thread/function is active between cache start & stop. + * + * Each event is 2 parts. It drops the first part, which is + * more for filtering events. It creates string from second part + * and saves it. + * + * The string is the serialized version of internal_event_ref + * + * It keeps two sets of data + * 1) List of all events received in vector in same order as received + * 2) Map of last event from each runtime id + * + * We add to the vector as much as allowed by vector and as well + * the available memory. When mem exhausts, just keep updating map + * with last event from that sender. + * + * The sequence number in map will help assess the missed count. + * + * Thread is started upon creating SUB end of capture socket. + */ + int capture_events(); + + + private: + void *m_ctx; + + events_data_lst_t m_events; + + last_events_t m_last_events; + + void *m_capture; + + + thread m_thread_proxy; + thread m_thread_capture; +}; + + + + + + + + From ff783438e13b369c2018e652bec2c757eb78bef7 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 19 May 2022 23:51:58 +0000 Subject: [PATCH 03/66] First run code complete --- src/sonic-eventd/eventd/eventd.cpp | 155 +++++++++++++++++++++-------- src/sonic-eventd/eventd/eventd.h | 10 +- 2 files changed, 119 insertions(+), 46 deletions(-) diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp index 18e47ed403d5..4445aae6fc15 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -13,13 +13,28 @@ * Main proxy service that runs XSUB/XPUB ends */ +#define MB(N) ((N) * 1024 * 1024) +#define EVT_SIZE_AVG 150 + +#define MAX_CACHE_SIZE (MB(100) / (EVT_SIZE_AVG)) + +/* Count of elements returned in each read */ #define READ_SET_SIZE 100 +/* + * Max count of possible concurrent event publishers + * A rough estimate only, more for mem reserve. + * So this does not limit any usage + */ +#define MAX_PUBLISHERS_COUNT 1000 + -eventd_server::eventd_server() : m_capture(NULL) +eventd_server::eventd_server() : m_capture(NULL), { m_ctx = zmq_ctx_new(); RET_ON_ERR(m_ctx != NULL, "Failed to get zmq ctx"); + m_cache_max = get_config_data(CACHE_MAX_CNT, (uint32_t)MAX_CACHE_SIZE); + out: return; } @@ -82,48 +97,78 @@ eventd_server::zproxy_service_run(void *frontend, void *backend, void *capture) int -eventd_server::capture_events() +eventd_server::capture_events(events_data_lst_t &lst) { /* clean any pre-existing cache */ int ret = -1; + int i; + + events_data_lst_t.swap(m_events); + last_events_t.swap(m_last_events); + + /* + * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only + * upon m_events/vector overflow, which might block adding new entries in map + * if overall mem consumption is too high. Clearing the map just before use + * is likely to help. + */ + for (i=0; i().swap(m_events); - map.swap(m_last_events); + /* Cache last + typedef map pre_exist_id_t; + pre_exist_id_t pre_exist_id; RET_ON_ERR(m_capture != NULL, "capture sock is not initialized yet"); - while(true) { - zmq_msg_t msg; - internal_event_t event; - int more = 0; - size_t more_size = sizeof (more); + if (!lst.empty()) { + for (events_data_lst_t::it = lst.begin(); it != lst.end(); ++it) { + internal_event_t event; - { - zmq_msg_t pat; - zmq_msg_init(&pat); - RET_ON_ERR(zmq_msg_recv(&pat, m_capture, 0) != -1, - "Failed to capture pattern"); - zmq_msg_close(&pat); + deserialize(*itc, event); + pre_exist_id[event[EVENT_RUNTIME_ID]] = events_base::str_to_seq(event[EVENT_SEQUENCE]); } + m_events.swap(lst); + } - RET_ON_ERR(zmq_getsockopt (m_capture, ZMQ_RCVMORE, &more, &more_size) == 0, - "Failed to get sockopt for capture sock"); - RET_ON_ERR(more, "Event data expected, but more is false"); + if (!pre_exist_id.empty()) { + /* Check read events against provided cache for 2 seconds to skip */ + chrono::steady_clock::timepoint start = chrono::steady_clock::now(); + while(!pre_exist_id.empty()) { + internal_event_t event; + string source, evt_str; - zmq_msg_init(&msg); - RET_ON_ERR(zmq_msg_recv(&msg, m_capture, 0) != -1, - "Failed to read event data"); + RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, + "Failed to read from capture socket"); - string s((const char *)zmq_msg_data(&msg), zmq_msg_size(&msg)); - zmq_msg_close(&msg); + deserialize(evt_str, event); - deserialize(s, event); + pre_exist_id_t::iterator it = pre_exist_id.find(event[EVENT_RUNTIME_ID]); + if (it != pre_exist_id.end()) { + seq = events_base::str_to_seq(event[EVENT_SEQUENCE]); + if (seq > it->second) { + m_events.push_back(evt_str); + } + if (seq >= it->second) { + pre_exist_id.erase(it); + } + } + if(chrono::steady_clock::now() - start > chrono::seconds(2)) + break; + } + pre_exist_id_t().swap(pre_exist_id); + } - m_last_events[event[EVENT_RUNTIME_ID]] = s; + /* Save until max allowed */ + while(m_events.size() < m_cache_max) { + string source, evt_str; + RET_ON_ERR(zmq_message_read(m_socket, ZMQ_DONTWAIT, source, evt_str) == 0, + "Failed to read from capture socket"); try { - m_events.push_back(s); + m_events.push_back(evt_str); } catch (exception& e) { @@ -131,12 +176,29 @@ eventd_server::capture_events() ss << e.what(); SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", ss.str().c_str(), m_events.size()); - goto out; + break; } } + + + /* Save only last event per sender */ + m_last_events.clear(); + while(true) { + internal_event_t event; + string source, evt_str; + + RET_ON_ERR(zmq_message_read(m_socket, ZMQ_DONTWAIT, source, evt_str) == 0, + "Failed to read from capture socket"); + + deserialize(evt_str, event); + + m_last_events[event[EVENT_RUNTIME_ID]] = evt_str; + } out: - /* Destroy the service and exit the thread */ - close(); + /* + * Capture stop will close the socket which fail the read + * and hence bail out. + */ return 0; } @@ -152,13 +214,14 @@ eventd_server::eventd_service() while(true) { int code, resp = -1; - vector req_data, resp_data; + events_data_lst_t req_data, resp_data; RET_ON_ERR(channel_read(code, data) == 0, "Failed to read request"); switch(code) { - case EVENT_CACHE_START: + case EVENT_CACHE_INIT: + /* connect only*/ if (m_capture != NULL) { resp_code = 1; break; @@ -172,21 +235,33 @@ eventd_server::eventd_service() rc = zmq_setsockopt(sub_read, ZMQ_SUBSCRIBE, "", 0); RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); + resp_code = 0; + break; + + + case EVENT_CACHE_START: + if (m_capture == NULL) { + resp_code = -1; + break; + } /* Kick off the service */ - m_thread_capture = thread(&eventd_server::capture_events, this); + m_thread_capture = thread(&eventd_server::capture_events, this, req_data); resp_code = 0; break; case EVENT_CACHE_STOP: - resp_code = 0; if (m_capture != NULL) { close(m_capture); m_capture = NULL; /* Wait for thread to end */ m_thread_capture.join(); + resp_code = 0; + } + else { + resp_code = -1; } break; @@ -204,13 +279,15 @@ eventd_server::eventd_service() int sz = m_events.size() < READ_SET_SIZE ? m_events.size() : READ_SET_SIZE; - auto it = std::next(m_events.begin(), sz); - move(m_events.begin(), m_events.end(), back_inserter(resp_data)); + if (sz != 0) { + auto it = std::next(m_events.begin(), sz); + move(m_events.begin(), m_events.end(), back_inserter(resp_data)); - if (sz == m_events.size()) { - events_data_lst_t().swap(m_events); - } else { - m_events.erase(m_events.begin(), it); + if (sz == m_events.size()) { + events_data_lst_t().swap(m_events); + } else { + m_events.erase(m_events.begin(), it); + } } break; diff --git a/src/sonic-eventd/eventd/eventd.h b/src/sonic-eventd/eventd/eventd.h index 3a7d480ca1dd..c110d7d01c85 100644 --- a/src/sonic-eventd/eventd/eventd.h +++ b/src/sonic-eventd/eventd/eventd.h @@ -78,10 +78,12 @@ class eventd_server { * * Thread is started upon creating SUB end of capture socket. */ - int capture_events(); + int capture_events(events_data_lst_t &); private: + uint32_t m_cache_max; + void *m_ctx; events_data_lst_t m_events; @@ -96,9 +98,3 @@ class eventd_server { }; - - - - - - From 52a8a145162a35c29c7908fdc89094fd71f8fc0e Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 20 May 2022 18:04:34 +0000 Subject: [PATCH 04/66] upon self code read --- src/sonic-eventd/eventd/eventd.cpp | 27 ++++++++++++++++----------- src/sonic-eventd/eventd/eventd.h | 15 +++++++-------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp index 4445aae6fc15..8fe21ff4df6f 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -29,7 +29,7 @@ #define MAX_PUBLISHERS_COUNT 1000 -eventd_server::eventd_server() : m_capture(NULL), +eventd_server::eventd_server() : m_capture(NULL) { m_ctx = zmq_ctx_new(); RET_ON_ERR(m_ctx != NULL, "Failed to get zmq ctx"); @@ -65,10 +65,10 @@ eventd_server::zproxy_service() RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(XPUB_END_KEY)); void *capture = zmq_socket(m_ctx, ZMQ_PUB); - RET_ON_ERR(capture != NULL, "failing to get ZMQ_XSUB socket"); + RET_ON_ERR(capture != NULL, "failing to get ZMQ_PUB socket for capture"); rc = zmq_bind(capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind PAIR to %s", get_config(PAIR_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config((CAPTURE_END_KEY)); m_thread_proxy = thread(&eventd_server::zproxy_service_run, this, frontend, backend, capture); @@ -103,9 +103,6 @@ eventd_server::capture_events(events_data_lst_t &lst) int ret = -1; int i; - events_data_lst_t.swap(m_events); - last_events_t.swap(m_last_events); - /* * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only * upon m_events/vector overflow, which might block adding new entries in map @@ -164,7 +161,7 @@ eventd_server::capture_events(events_data_lst_t &lst) while(m_events.size() < m_cache_max) { string source, evt_str; - RET_ON_ERR(zmq_message_read(m_socket, ZMQ_DONTWAIT, source, evt_str) == 0, + RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, "Failed to read from capture socket"); try { @@ -182,12 +179,11 @@ eventd_server::capture_events(events_data_lst_t &lst) /* Save only last event per sender */ - m_last_events.clear(); while(true) { internal_event_t event; string source, evt_str; - RET_ON_ERR(zmq_message_read(m_socket, ZMQ_DONTWAIT, source, evt_str) == 0, + RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, "Failed to read from capture socket"); deserialize(evt_str, event); @@ -227,10 +223,10 @@ eventd_server::eventd_service() break; } m_capture = zmq_socket(m_ctx, ZMQ_SUB); - RET_ON_ERR(capture != NULL, "failing to get ZMQ_XSUB socket"); + RET_ON_ERR(capture != NULL, "failing to get ZMQ_SUB socket"); rc = zmq_connect(capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind PAIR to %s", get_config(PAIR_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config((CAPTURE_END_KEY)); rc = zmq_setsockopt(sub_read, ZMQ_SUBSCRIBE, "", 0); RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); @@ -244,6 +240,9 @@ eventd_server::eventd_service() resp_code = -1; break; } + events_data_lst_t.swap(m_events); + last_events_t.swap(m_last_events); + /* Kick off the service */ m_thread_capture = thread(&eventd_server::capture_events, this, req_data); @@ -253,6 +252,12 @@ eventd_server::eventd_service() case EVENT_CACHE_STOP: if (m_capture != NULL) { + /* + * Caller would have initiated SUBS channel. + * Read for CACHE_DRAIN_IN_MILLISECS to drain off cache + * before stopping. + */ + this_thread::sleep_for(chrono::milliseconds(CACHE_DRAIN_IN_MILLISECS)); close(m_capture); m_capture = NULL; diff --git a/src/sonic-eventd/eventd/eventd.h b/src/sonic-eventd/eventd/eventd.h index c110d7d01c85..855bb0dba78c 100644 --- a/src/sonic-eventd/eventd/eventd.h +++ b/src/sonic-eventd/eventd/eventd.h @@ -3,7 +3,7 @@ */ #include "events_service.h" -typedef map last_events_t; +typedef map last_events_t; class eventd_server { public: @@ -68,17 +68,17 @@ class eventd_server { * * It keeps two sets of data * 1) List of all events received in vector in same order as received - * 2) Map of last event from each runtime id + * 2) Map of last event from each runtime id upon list overflow max size. * - * We add to the vector as much as allowed by vector and as well - * the available memory. When mem exhausts, just keep updating map - * with last event from that sender. + * We add to the vector as much as allowed by vector and max limit, + * whichever comes first. * - * The sequence number in map will help assess the missed count. + * The sequence number in internal event will help assess the missed count + * by the consumer of the cache data. * * Thread is started upon creating SUB end of capture socket. */ - int capture_events(events_data_lst_t &); + int capture_events(events_data_lst_t &lst); private: @@ -92,7 +92,6 @@ class eventd_server { void *m_capture; - thread m_thread_proxy; thread m_thread_capture; }; From 30c5108528fbd48f58a1a2b3626ff7face0a32f8 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 26 May 2022 21:03:06 +0000 Subject: [PATCH 05/66] update --- src/sonic-eventd/eventd/eventd.cpp | 44 ++++++++++++++++++------------ 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp index 8fe21ff4df6f..e79750736a5f 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -68,7 +68,7 @@ eventd_server::zproxy_service() RET_ON_ERR(capture != NULL, "failing to get ZMQ_PUB socket for capture"); rc = zmq_bind(capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config((CAPTURE_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config(CAPTURE_END_KEY)); m_thread_proxy = thread(&eventd_server::zproxy_service_run, this, frontend, backend, capture); @@ -123,8 +123,12 @@ eventd_server::capture_events(events_data_lst_t &lst) for (events_data_lst_t::it = lst.begin(); it != lst.end(); ++it) { internal_event_t event; - deserialize(*itc, event); - pre_exist_id[event[EVENT_RUNTIME_ID]] = events_base::str_to_seq(event[EVENT_SEQUENCE]); + if (deserialize(*itc, event) == 0) { + pre_exist_id[event[EVENT_RUNTIME_ID]] = events_base::str_to_seq(event[EVENT_SEQUENCE]); + } + else { + SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); + } } m_events.swap(lst); } @@ -139,18 +143,21 @@ eventd_server::capture_events(events_data_lst_t &lst) RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, "Failed to read from capture socket"); - deserialize(evt_str, event); - - pre_exist_id_t::iterator it = pre_exist_id.find(event[EVENT_RUNTIME_ID]); - if (it != pre_exist_id.end()) { - seq = events_base::str_to_seq(event[EVENT_SEQUENCE]); - if (seq > it->second) { - m_events.push_back(evt_str); - } - if (seq >= it->second) { - pre_exist_id.erase(it); + if (deserialize(evt_str, event) == 0) { + pre_exist_id_t::iterator it = pre_exist_id.find(event[EVENT_RUNTIME_ID]); + if (it != pre_exist_id.end()) { + seq = events_base::str_to_seq(event[EVENT_SEQUENCE]); + if (seq > it->second) { + m_events.push_back(evt_str); + } + if (seq >= it->second) { + pre_exist_id.erase(it); + } } } + else { + SWSS_LOG_ERROR("failed to serialize received event from publisher. DROP"); + } if(chrono::steady_clock::now() - start > chrono::seconds(2)) break; } @@ -186,9 +193,12 @@ eventd_server::capture_events(events_data_lst_t &lst) RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, "Failed to read from capture socket"); - deserialize(evt_str, event); - - m_last_events[event[EVENT_RUNTIME_ID]] = evt_str; + if (deserialize(evt_str, event) == 0) { + m_last_events[event[EVENT_RUNTIME_ID]] = evt_str; + } + else { + SWSS_LOG_ERROR("FAILED to serialize received event from publisher. DROP"); + } } out: /* @@ -226,7 +236,7 @@ eventd_server::eventd_service() RET_ON_ERR(capture != NULL, "failing to get ZMQ_SUB socket"); rc = zmq_connect(capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config((CAPTURE_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(CAPTURE_END_KEY)); rc = zmq_setsockopt(sub_read, ZMQ_SUBSCRIBE, "", 0); RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); From b95e5d93681e1ff9f64598e65c66373eee05307e Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 26 May 2022 21:06:13 +0000 Subject: [PATCH 06/66] Added severity --- .../yang-events/sonic-events-bgp.yang | 10 ++++++++++ .../yang-events/sonic-events-dhcp_relay.yang | 8 ++++++++ .../yang-events/sonic-events-host.yang | 20 +++++++++++++++++++ .../yang-events/sonic-events-pmon.yang | 6 ++++++ .../yang-events/sonic-events-swss.yang | 12 +++++++++++ .../yang-events/sonic-events-syncd.yang | 6 ++++++ 6 files changed, 62 insertions(+) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 1512840fd3c6..5b0508e366f2 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -7,6 +7,10 @@ module sonic-events-bgp { prefix evtcmn; } + import openconfig-alarm-types { + prefix oc-alarm-types; + } + revision 2022-12-01 { description "BGP alert events."; } @@ -21,6 +25,8 @@ module sonic-events-bgp { "SONIC BGP events"; container bgp-state { + oc-alarm-types:MAJOR + description " Declares an event for BGP state for a neighbor IP The status says up or down"; @@ -42,6 +48,8 @@ module sonic-events-bgp { } container bgp-hold-timer { + oc-alarm-types:MAJOR + description " Declares an event for BGP hold timer expiry. This event does not have any other parameter. @@ -51,6 +59,8 @@ module sonic-events-bgp { } container zebra-no-buff { + oc-alarm-types:MAJOR + description " Declares an event for zebra running out of buffer. This event does not have any other parameter. diff --git a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang index b9e67c1fceeb..a580afdf3a1a 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang @@ -2,6 +2,10 @@ module sonic-events-dhcp-relay { namespace "http://github.com/sonic-net/sonic-events-dhcp-relay"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-dhcp-relay { "SONIC dhcp-relay events"; container dhcp-relay-discard { + oc-alarm-types:MAJOR + description " Declares an event for dhcp-relay discarding packet on an interface due to missing IP address assigned. @@ -44,6 +50,8 @@ module sonic-events-dhcp-relay { } container dhcp-relay-disparity { + oc-alarm-types:MAJOR + description " Declares an event for disparity detected in DHCP Relay behavior by dhcpmon. diff --git a/src/sonic-yang-models/yang-events/sonic-events-host.yang b/src/sonic-yang-models/yang-events/sonic-events-host.yang index d9c20b9ae358..95da1106ee5c 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-host.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-host.yang @@ -2,6 +2,10 @@ module events-host { namespace "http://github.com/sonic-net/sonic-events-host"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -10,6 +14,8 @@ module events-host { } container disk-usage { + oc-alarm-types:MINOR + description " Declares an event for disk usage crossing set limit The parameters describe the usage & limit set."; @@ -26,6 +32,8 @@ module events-host { } container memory-usage { + oc-alarm-types:MINOR + description " Declares an event for memory usage crossing set limit The parameters describe the usage & limit set."; @@ -36,6 +44,8 @@ module events-host { } container event-sshd { + oc-alarm-types:MINOR + description " Declares an event reported by sshd. The fail type declares the type of failure. @@ -53,6 +63,8 @@ module events-host { } container event-disk { + oc-alarm-types:MINOR + description " Declares an event reported by disk check. The fail type declares the type of failure. @@ -69,6 +81,8 @@ module events-host { } container event-kernel { + oc-alarm-types:MINOR + description " Declares an event reported by kernel. The fail type declares the type of failure."; @@ -95,6 +109,8 @@ module events-host { } container event-monit-proc { + evtcmn:severity "2" + description " Declares an event reported by monit for a process that is not running. @@ -119,6 +135,8 @@ module events-host { } container event-monit-status { + evtcmn:severity "2" + description " Declares an event reported by monit for status check failure for a process @@ -149,6 +167,8 @@ module events-host { } container event-platform { + evtcmn:severity "2" + description " Declares an event for platform related failure. Params: diff --git a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang index 865ed2a65f8a..6439eacaafc3 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang @@ -2,6 +2,10 @@ module sonic-events-pmon { namespace "http://github.com/sonic-net/sonic-events-pmon"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -21,6 +25,8 @@ module sonic-events-pmon { container pmon-exited { + oc-alarm-types:MAJOR + description " Declares an event reportes by pmon for an unexpected exit. The exited entity is the only param"; diff --git a/src/sonic-yang-models/yang-events/sonic-events-swss.yang b/src/sonic-yang-models/yang-events/sonic-events-swss.yang index 1afa6947188d..2b5bbca3031f 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-swss.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-swss.yang @@ -2,6 +2,10 @@ module sonic-events-swss { namespace "http://github.com/sonic-net/sonic-events-swss"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-swss { "SONIC SWSS events"; container redis-generic { + oc-alarm-types:MAJOR + description " Declares an event for a fatal error encountered by swss. The asic-index of the failing process is the only param."; @@ -34,6 +40,8 @@ module sonic-events-swss { } container if-state { + oc-alarm-types:MINOR + description " Declares an event for i/f flap. @@ -57,6 +65,8 @@ module sonic-events-swss { } container pfc-storm { + oc-alarm-types:MAJOR + description " Declares an event for PFC storm. @@ -86,6 +96,8 @@ module sonic-events-swss { } container chk_crm_threshold { + oc-alarm-types:MAJOR + description " Declares an event for CRM threshold."; diff --git a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang index bf7e332c62f5..8a8a62579c99 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang @@ -2,6 +2,10 @@ module sonic-events-syncd { namespace "http://github.com/sonic-net/sonic-events-syncd"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-syncd { "SONIC syncd events"; container syncd_failure { + oc-alarm-types:MAJOR + description " Declares an event for all types of syncd failure. The type of failure and the asic-index of failing syncd are From e1b6f477f80bd9a20aa42bef47b5c1e47e84ebb6 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 26 May 2022 21:22:11 +0000 Subject: [PATCH 07/66] added severity --- .../yang-events/sonic-events-bgp.yang | 10 ++++++++++ .../yang-events/sonic-events-dhcp_relay.yang | 8 ++++++++ .../yang-events/sonic-events-host.yang | 20 +++++++++++++++++++ .../yang-events/sonic-events-pmon.yang | 6 ++++++ .../yang-events/sonic-events-swss.yang | 12 +++++++++++ .../yang-events/sonic-events-syncd.yang | 6 ++++++ 6 files changed, 62 insertions(+) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 1512840fd3c6..5b0508e366f2 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -7,6 +7,10 @@ module sonic-events-bgp { prefix evtcmn; } + import openconfig-alarm-types { + prefix oc-alarm-types; + } + revision 2022-12-01 { description "BGP alert events."; } @@ -21,6 +25,8 @@ module sonic-events-bgp { "SONIC BGP events"; container bgp-state { + oc-alarm-types:MAJOR + description " Declares an event for BGP state for a neighbor IP The status says up or down"; @@ -42,6 +48,8 @@ module sonic-events-bgp { } container bgp-hold-timer { + oc-alarm-types:MAJOR + description " Declares an event for BGP hold timer expiry. This event does not have any other parameter. @@ -51,6 +59,8 @@ module sonic-events-bgp { } container zebra-no-buff { + oc-alarm-types:MAJOR + description " Declares an event for zebra running out of buffer. This event does not have any other parameter. diff --git a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang index b9e67c1fceeb..a580afdf3a1a 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang @@ -2,6 +2,10 @@ module sonic-events-dhcp-relay { namespace "http://github.com/sonic-net/sonic-events-dhcp-relay"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-dhcp-relay { "SONIC dhcp-relay events"; container dhcp-relay-discard { + oc-alarm-types:MAJOR + description " Declares an event for dhcp-relay discarding packet on an interface due to missing IP address assigned. @@ -44,6 +50,8 @@ module sonic-events-dhcp-relay { } container dhcp-relay-disparity { + oc-alarm-types:MAJOR + description " Declares an event for disparity detected in DHCP Relay behavior by dhcpmon. diff --git a/src/sonic-yang-models/yang-events/sonic-events-host.yang b/src/sonic-yang-models/yang-events/sonic-events-host.yang index d9c20b9ae358..95da1106ee5c 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-host.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-host.yang @@ -2,6 +2,10 @@ module events-host { namespace "http://github.com/sonic-net/sonic-events-host"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -10,6 +14,8 @@ module events-host { } container disk-usage { + oc-alarm-types:MINOR + description " Declares an event for disk usage crossing set limit The parameters describe the usage & limit set."; @@ -26,6 +32,8 @@ module events-host { } container memory-usage { + oc-alarm-types:MINOR + description " Declares an event for memory usage crossing set limit The parameters describe the usage & limit set."; @@ -36,6 +44,8 @@ module events-host { } container event-sshd { + oc-alarm-types:MINOR + description " Declares an event reported by sshd. The fail type declares the type of failure. @@ -53,6 +63,8 @@ module events-host { } container event-disk { + oc-alarm-types:MINOR + description " Declares an event reported by disk check. The fail type declares the type of failure. @@ -69,6 +81,8 @@ module events-host { } container event-kernel { + oc-alarm-types:MINOR + description " Declares an event reported by kernel. The fail type declares the type of failure."; @@ -95,6 +109,8 @@ module events-host { } container event-monit-proc { + evtcmn:severity "2" + description " Declares an event reported by monit for a process that is not running. @@ -119,6 +135,8 @@ module events-host { } container event-monit-status { + evtcmn:severity "2" + description " Declares an event reported by monit for status check failure for a process @@ -149,6 +167,8 @@ module events-host { } container event-platform { + evtcmn:severity "2" + description " Declares an event for platform related failure. Params: diff --git a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang index 865ed2a65f8a..6439eacaafc3 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang @@ -2,6 +2,10 @@ module sonic-events-pmon { namespace "http://github.com/sonic-net/sonic-events-pmon"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -21,6 +25,8 @@ module sonic-events-pmon { container pmon-exited { + oc-alarm-types:MAJOR + description " Declares an event reportes by pmon for an unexpected exit. The exited entity is the only param"; diff --git a/src/sonic-yang-models/yang-events/sonic-events-swss.yang b/src/sonic-yang-models/yang-events/sonic-events-swss.yang index 1afa6947188d..2b5bbca3031f 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-swss.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-swss.yang @@ -2,6 +2,10 @@ module sonic-events-swss { namespace "http://github.com/sonic-net/sonic-events-swss"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-swss { "SONIC SWSS events"; container redis-generic { + oc-alarm-types:MAJOR + description " Declares an event for a fatal error encountered by swss. The asic-index of the failing process is the only param."; @@ -34,6 +40,8 @@ module sonic-events-swss { } container if-state { + oc-alarm-types:MINOR + description " Declares an event for i/f flap. @@ -57,6 +65,8 @@ module sonic-events-swss { } container pfc-storm { + oc-alarm-types:MAJOR + description " Declares an event for PFC storm. @@ -86,6 +96,8 @@ module sonic-events-swss { } container chk_crm_threshold { + oc-alarm-types:MAJOR + description " Declares an event for CRM threshold."; diff --git a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang index bf7e332c62f5..8a8a62579c99 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang @@ -2,6 +2,10 @@ module sonic-events-syncd { namespace "http://github.com/sonic-net/sonic-events-syncd"; yang-version 1.1; + import openconfig-alarm-types { + prefix oc-alarm-types; + } + import sonic-events-common { prefix evtcmn; } @@ -20,6 +24,8 @@ module sonic-events-syncd { "SONIC syncd events"; container syncd_failure { + oc-alarm-types:MAJOR + description " Declares an event for all types of syncd failure. The type of failure and the asic-index of failing syncd are From bb842adfe65689a543919f86532503ef1e0af586 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 26 May 2022 21:30:54 +0000 Subject: [PATCH 08/66] Sev update --- src/sonic-yang-models/yang-events/sonic-events-bgp.yang | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 5b0508e366f2..1406a8b24047 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -25,7 +25,7 @@ module sonic-events-bgp { "SONIC BGP events"; container bgp-state { - oc-alarm-types:MAJOR + oc-alarm-types:MINOR description " Declares an event for BGP state for a neighbor IP From acffaa16c68c251a220ca0840837282c0342bc13 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 2 Jun 2022 18:22:51 +0000 Subject: [PATCH 09/66] First cut --- src/sonic-eventd/eventd/eventd.cpp | 439 ++++++++++++++++++----------- src/sonic-eventd/eventd/eventd.h | 198 ++++++++----- 2 files changed, 390 insertions(+), 247 deletions(-) diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp index e79750736a5f..fde4fd299d35 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -21,87 +21,95 @@ /* Count of elements returned in each read */ #define READ_SET_SIZE 100 -/* - * Max count of possible concurrent event publishers - * A rough estimate only, more for mem reserve. - * So this does not limit any usage - */ -#define MAX_PUBLISHERS_COUNT 1000 - - -eventd_server::eventd_server() : m_capture(NULL) -{ - m_ctx = zmq_ctx_new(); - RET_ON_ERR(m_ctx != NULL, "Failed to get zmq ctx"); - m_cache_max = get_config_data(CACHE_MAX_CNT, (uint32_t)MAX_CACHE_SIZE); - -out: - return; -} - - -eventd_server::~eventd_server() -{ - close(); -} - - int -eventd_server::zproxy_service() +eventd_proxy::init() { int ret = -1; SWSS_LOG_INFO("Start xpub/xsub proxy"); - void *frontend = zmq_socket(m_ctx, ZMQ_XSUB); - RET_ON_ERR(frontend != NULL, "failing to get ZMQ_XSUB socket"); + m_frontend = zmq_socket(m_ctx, ZMQ_XSUB); + RET_ON_ERR(m_frontend != NULL, "failing to get ZMQ_XSUB socket"); - int rc = zmq_bind(frontend, get_config(XSUB_END_KEY)); + int rc = zmq_bind(m_frontend, get_config(XSUB_END_KEY)); RET_ON_ERR(rc == 0, "Failing to bind XSUB to %s", get_config(XSUB_END_KEY)); - void *backend = zmq_socket(m_ctx, ZMQ_XPUB); - RET_ON_ERR(backend != NULL, "failing to get ZMQ_XPUB socket"); + m_backend = zmq_socket(m_ctx, ZMQ_XPUB); + RET_ON_ERR(m_backend != NULL, "failing to get ZMQ_XPUB socket"); - rc = zmq_bind(backend, get_config(XPUB_END_KEY)); + rc = zmq_bind(m_backend, get_config(XPUB_END_KEY)); RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(XPUB_END_KEY)); - void *capture = zmq_socket(m_ctx, ZMQ_PUB); - RET_ON_ERR(capture != NULL, "failing to get ZMQ_PUB socket for capture"); + m_capture = zmq_socket(m_ctx, ZMQ_PUB); + RET_ON_ERR(m_capture != NULL, "failing to get ZMQ_PUB socket for capture"); - rc = zmq_bind(capture, get_config(CAPTURE_END_KEY)); + rc = zmq_bind(m_capture, get_config(CAPTURE_END_KEY)); RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config(CAPTURE_END_KEY)); - m_thread_proxy = thread(&eventd_server::zproxy_service_run, this, frontend, - backend, capture); + m_thr = thread(&eventd_proxy::run, this); ret = 0; out: return ret; } - void -eventd_server::zproxy_service_run(void *frontend, void *backend, void *capture) +eventd_proxy::run() { SWSS_LOG_INFO("Running xpub/xsub proxy"); /* runs forever until zmq context is terminated */ - zmq_proxy(frontend, backend, capture); + zmq_proxy(m_frontend, m_backend, m_capture); - zmq_close(frontend); - zmq_close(backend); - zmq_close(capture); + return 0; +} - SWSS_LOG_ERR("Terminating xpub/xsub proxy"); - return 0; +capture_service::~capture_service() +{ + stop_capture(); } +capture_service::stop_capture() +{ + if (m_socket != NULL) { + zmq_close(m_socket); + m_socket = NULL; + } + if (m_thr.joinable()) { + m_thr.join(); + } +} -int -eventd_server::capture_events(events_data_lst_t &lst) +static bool +validate_event(const internal_event_t &event, string &rid, string &seq) +{ + bool ret = false; + + internal_event_t::const_iterator itc_r, itc_s, itc_e; + itc_r = event.find(EVENT_RUNTIME_ID); + itc_s = event.find(EVENT_SEQUENCE); + itc_e = event.find(EVENT_STR_DATA); + + if ((itc_r != event.end()) && (itc_s != event.end()) && (itc_e != event.end())) { + invalid_evt = true; + + rid = itc_r->second; + seq = itc_s->second; + } + else { + SWSS_LOG_ERROR("Invalid evt: %s", map_to_str(event).str()); + } + + return ret; +} + + +void +capture_service::init_capture_cache(events_data_lst_t &lst) { /* clean any pre-existing cache */ int ret = -1; int i; + string rid, seq; /* * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only @@ -114,73 +122,80 @@ eventd_server::capture_events(events_data_lst_t &lst) } /* Cache last - typedef map pre_exist_id_t; - pre_exist_id_t pre_exist_id; - - RET_ON_ERR(m_capture != NULL, "capture sock is not initialized yet"); - - if (!lst.empty()) { - for (events_data_lst_t::it = lst.begin(); it != lst.end(); ++it) { - internal_event_t event; + for (events_data_lst_t::it = lst.begin(); it != lst.end(); ++it) { + internal_event_t event; - if (deserialize(*itc, event) == 0) { - pre_exist_id[event[EVENT_RUNTIME_ID]] = events_base::str_to_seq(event[EVENT_SEQUENCE]); - } - else { - SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); + if (deserialize(*itc, event) == 0) { + if (validate_event(event, rid, seq)) { + m_pre_exist_id[event[EVENT_RUNTIME_ID]] = rid; + m_events.push_back(*itc); } } - m_events.swap(lst); + else { + SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); + } } +} - if (!pre_exist_id.empty()) { - /* Check read events against provided cache for 2 seconds to skip */ - chrono::steady_clock::timepoint start = chrono::steady_clock::now(); - while(!pre_exist_id.empty()) { - internal_event_t event; - string source, evt_str; - - RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, - "Failed to read from capture socket"); - - if (deserialize(evt_str, event) == 0) { - pre_exist_id_t::iterator it = pre_exist_id.find(event[EVENT_RUNTIME_ID]); - if (it != pre_exist_id.end()) { - seq = events_base::str_to_seq(event[EVENT_SEQUENCE]); - if (seq > it->second) { - m_events.push_back(evt_str); - } - if (seq >= it->second) { - pre_exist_id.erase(it); - } + +void +capture_service::do_capture() +{ + /* clean any pre-existing cache */ + int ret = -1; + int i; + string rid, seq; + + /* Check read events against provided cache for 2 seconds to skip */ + chrono::steady_clock::timepoint start = chrono::steady_clock::now(); + while(!m_pre_exist_id.empty()) { + internal_event_t event; + string source, evt_str; + + RET_ON_ERR(zmq_message_read(m_socket, 0, source, event) == 0, + "Failed to read from capture socket"); + + if (validate_event(event, rid, seq)) { + + serialize(event, evt_str); + m_pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); + + if (it != m_pre_exist_id.end()) { + seq = events_base::str_to_seq(seq); + if (seq > it->second) { + m_events.push_back(evt_str); + } + if (seq >= it->second) { + m_pre_exist_id.erase(it); } } - else { - SWSS_LOG_ERROR("failed to serialize received event from publisher. DROP"); - } - if(chrono::steady_clock::now() - start > chrono::seconds(2)) - break; } - pre_exist_id_t().swap(pre_exist_id); + if(chrono::steady_clock::now() - start > chrono::seconds(2)) + break; } + pre_exist_id_t().swap(m_pre_exist_id); /* Save until max allowed */ while(m_events.size() < m_cache_max) { + internal_event_t event; string source, evt_str; - RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, + RET_ON_ERR(zmq_message_read(m_socket, 0, source, event) == 0, "Failed to read from capture socket"); - try - { - m_events.push_back(evt_str); - } - catch (exception& e) - { - stringstream ss; - ss << e.what(); - SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", - ss.str().c_str(), m_events.size()); - break; + if (validate_event(event, rid, seq)) { + serialize(event, evt_str); + try + { + m_events.push_back(evt_str); + } + catch (exception& e) + { + stringstream ss; + ss << e.what(); + SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", + ss.str().c_str(), m_events.size()); + break; + } } } @@ -190,14 +205,12 @@ eventd_server::capture_events(events_data_lst_t &lst) internal_event_t event; string source, evt_str; - RET_ON_ERR(zmq_message_read(m_socket, 0, source, evt_str) == 0, + RET_ON_ERR(zmq_message_read(m_socket, 0, source, event) == 0, "Failed to read from capture socket"); - if (deserialize(evt_str, event) == 0) { - m_last_events[event[EVENT_RUNTIME_ID]] = evt_str; - } - else { - SWSS_LOG_ERROR("FAILED to serialize received event from publisher. DROP"); + if (validate_event(event, rid, seq)) { + serialize(event, evt_str); + m_last_events[rid] = evt_str; } } out: @@ -208,15 +221,109 @@ eventd_server::capture_events(events_data_lst_t &lst) return 0; } +int +capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) +{ + int ret = -1; + + /* Can go in single step only. */ + RET_ON_ERR((ctrl - m_ctrl) == 1, "m_ctrl(%d) > ctrl(%d)", m_ctrl, ctrl); + m_ctrl = ctrl; + + switch(m_ctrl) { + case INIT_CAPTURE: + { + void *sock = NULL; + sock = zmq_socket(m_ctx, ZMQ_SUB); + RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); + + rc = zmq_connect(sock, get_config(CAPTURE_END_KEY)); + RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(CAPTURE_END_KEY)); + + rc = zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); + RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); + + m_socket = sock; + } + ret = 0; + break; + + case START_CAPTURE: + + /* + * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only + * upon m_events/vector overflow, which might block adding new entries in map + * if overall mem consumption is too high. Clearing the map just before use + * is likely to help. + */ + for (i=0; iempty())) { + init_capture_cache(*lst); + } + + m_thr = thread(&capture_service::do_capture, this); + RET_ON_ERR(m_thr.joinable(), "Capture thread not running"); + ret = 0; + break; + + + case STOP_CAPTURE: + /* + * Caller would have initiated SUBS channel. + * Read for CACHE_DRAIN_IN_MILLISECS to drain off cache + * before stopping. + */ + this_thread::sleep_for(chrono::milliseconds(CACHE_DRAIN_IN_MILLISECS)); + stop_capture(); + break; + + default: + SWSS_LOG_ERROR("Unexpected code=%d", m_ctrl); + break; + } +out: + return ret; +} int -eventd_server::eventd_service() +capture_service::read_cache(events_data_lst_t &lst_fifo, + last_events_t &lst_last) +{ + lst_fifo.swap(m_events); + lst_last.swap(m_last_events); + last_events_t().swap(m_last_events); + events_data_lst_t().swap(m_events); + return 0; +} + + +void +run_eventd_service() { event_service service; + eventd_proxy *proxy = NULL; + capture_service *capture = NULL; - RET_ON_ERR(zproxy_service() == 0, "Failed to start zproxy_service"); + events_data_lst_t capture_fifo_events; + last_events_t capture_last_events; - RET_ON_ERR(service.init_server(m_ctx) == 0, "Failed to init service"); + SWSS_LOG_ERROR("Eventd service starting\n"); + + void *zctx = zmq_ctx_new(); + RET_ON_ERR(ctx != NULL, "Failed to get zmq ctx"); + + cache_max = get_config_data(CACHE_MAX_CNT, (uint32_t)MAX_CACHE_SIZE); + RET_ON_ERR(cache_max > 0, "Failed to get CACHE_MAX_CNT"); + + proxy = new eventd_proxy(zctx); + RET_ON_ERR(proxy != NULL, "Failed to create proxy"); + + RET_ON_ERR(proxy->init() == 0, "Failed to init proxy"); + + RET_ON_ERR(service.init_server(zctx) == 0, "Failed to init service"); while(true) { int code, resp = -1; @@ -228,87 +335,79 @@ eventd_server::eventd_service() switch(code) { case EVENT_CACHE_INIT: /* connect only*/ - if (m_capture != NULL) { - resp_code = 1; - break; + if (capture != NULL) { + delete capture; } - m_capture = zmq_socket(m_ctx, ZMQ_SUB); - RET_ON_ERR(capture != NULL, "failing to get ZMQ_SUB socket"); - - rc = zmq_connect(capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(CAPTURE_END_KEY)); + events_data_lst_t().swap(capture_fifo_events); + last_events_t().swap(capture_last_events); - rc = zmq_setsockopt(sub_read, ZMQ_SUBSCRIBE, "", 0); - RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); - - resp_code = 0; + capture = new capture_service(zctx, cache_max); + if (capture != NULL) { + resp = capture->set_control(INIT_CAPTURE); + } break; case EVENT_CACHE_START: - if (m_capture == NULL) { - resp_code = -1; + if (capture == NULL) { + SWSS_LOG_ERROR("Cache is not initialized to start"); + resp = -1; break; } - events_data_lst_t.swap(m_events); - last_events_t.swap(m_last_events); - - /* Kick off the service */ - m_thread_capture = thread(&eventd_server::capture_events, this, req_data); - - resp_code = 0; + resp = capture->set_control(START_CAPTURE, req_data); break; case EVENT_CACHE_STOP: - if (m_capture != NULL) { - /* - * Caller would have initiated SUBS channel. - * Read for CACHE_DRAIN_IN_MILLISECS to drain off cache - * before stopping. - */ - this_thread::sleep_for(chrono::milliseconds(CACHE_DRAIN_IN_MILLISECS)); - close(m_capture); - m_capture = NULL; - - /* Wait for thread to end */ - m_thread_capture.join(); - resp_code = 0; + if (capture == NULL) { + SWSS_LOG_ERROR("Cache is not initialized to stop"); + resp = -1; + break; } - else { - resp_code = -1; + resp = capture->set_control(STOP_CAPTURE); + if (resp == 0) { + resp = capture->read_cache(capture_fifo_events, capture_last_events); } + delete capture; + capture = NULL; break; case EVENT_CACHE_READ: - resp_code = 0; + if (capture != NULL) { + SWSS_LOG_ERROR("Cache is not stopped yet."); + resp = -1; + break; + } + resp = 0; - if (m_events.empty()) { - for (last_events_t::iterator it = m_last_events.begin(); - it != m_last_events.end(); ++it) { - m_events.push_back(it->second); + if (capture_fifo_events.empty()) { + for (last_events_t::iterator it = capture_last_events.begin(); + it != capture_last_events.end(); ++it) { + capture_fifo_events.push_back(it->second); } - last_events_t().swap(m_last_events); + last_events_t().swap(capture_last_events); } - int sz = m_events.size() < READ_SET_SIZE ? m_events.size() : READ_SET_SIZE; + int sz = capture_fifo_events.size() < READ_SET_SIZE ? + capture_fifo_events.size() : READ_SET_SIZE; if (sz != 0) { - auto it = std::next(m_events.begin(), sz); - move(m_events.begin(), m_events.end(), back_inserter(resp_data)); + auto it = std::next(capture_fifo_events.begin(), sz); + move(capture_fifo_events.begin(), capture_fifo_events.end(), + back_inserter(resp_data)); - if (sz == m_events.size()) { - events_data_lst_t().swap(m_events); + if (sz == capture_fifo_events.size()) { + events_data_lst_t().swap(capture_fifo_events); } else { - m_events.erase(m_events.begin(), it); + events.erase(capture_fifo_events.begin(), it); } } break; case EVENT_ECHO: - resp_code = 0; + resp = 0; resp_data.swap(req_data); default: @@ -320,22 +419,18 @@ eventd_server::eventd_service() "Failed to write response back"); } out: - /* Breaks here on fatal failure */ - if (m_capture != NULL) { - close(m_capture); - m_capture = NULL; + m_service.close(); + if (proxy != NULL) { + delete proxy; } - close(); - m_thread_proxy.join(); - m_thread_capture.join(); - return 0; -} - - - -void eventd_server::close() -{ - zmq_ctx_term(m_ctx); m_ctx = NULL; + if (capture != NULL) { + delete capture; + } + if (zctx != NULL) { + zmq_ctx_term(zctx); + } + SWSS_LOG_ERROR("Eventd service exiting\n"); + meturn 0; } diff --git a/src/sonic-eventd/eventd/eventd.h b/src/sonic-eventd/eventd/eventd.h index 855bb0dba78c..3b20ad344916 100644 --- a/src/sonic-eventd/eventd/eventd.h +++ b/src/sonic-eventd/eventd/eventd.h @@ -5,95 +5,143 @@ typedef map last_events_t; -class eventd_server { +/* + * Started by eventd_service. + * Creates XPUB & XSUB end points. + onicanalytics.azurecr.io Bind the same + * Create a PUB socket end point for capture and bind. + * Call run_proxy method with sockets in a dedicated thread. + * Thread runs forever until the zmq context is terminated. + */ +class eventd_proxy +{ public: - /* Creates the zmq context */ - eventd_server(); - - ~eventd_server(); - - /* - * Main eventd service loop that honors event_req_type_t - * - * For echo, it just echoes - * - * FOr cache start, create the SUB end of capture and kick off - * capture_events thread. Upon cache stop command, close the handle - * which will stop the caching thread with read failure. - * - * for cache read, returns the collected events as subset of - * strings. - * - */ - int eventd_service(); - - - /* - * For any fatal failure, terminate the entire run across threads - * by deleting the zmq context. - */ - void close(); + eventd_proxy(void *ctx) : m_ctx(ctx), m_frontend(NULL), m_backend(NULL), + m_capture(NULL) {}; + + ~eventd_proxy() { + zmq_close(m_frontend); + zmq_close(m_backend); + zmq_close(m_capture); + + if (m_thr.joinable) + m_thr.join(); + } + + int init(); private: - /* - * Started by eventd_service. - * Creates XPUB & XSUB end points. - * Bind the same - * Create a PUB socket end point for capture and bind. - * Call run_proxy method with sockets in a dedicated thread. - * Thread runs forever until the zmq context is terminated. - */ - int zproxy_service(); - int zproxy_service_run(void *front, void *back, void *capture); - - - /* - * Capture/Cache service - * - * The service started in a dedicted thread upon demand. - * It expects SUB end of capture created & connected to the PUB - * capture end in zproxy service. - * - * It goes in a forever loop, until the zmq read fails, which will happen - * if the capture/SUB end is closed. The stop cache service will close it, - * while start cache service creates & connects. - * - * Hence this thread/function is active between cache start & stop. - * - * Each event is 2 parts. It drops the first part, which is - * more for filtering events. It creates string from second part - * and saves it. - * - * The string is the serialized version of internal_event_ref - * - * It keeps two sets of data - * 1) List of all events received in vector in same order as received - * 2) Map of last event from each runtime id upon list overflow max size. - * - * We add to the vector as much as allowed by vector and max limit, - * whichever comes first. - * - * The sequence number in internal event will help assess the missed count - * by the consumer of the cache data. - * - * Thread is started upon creating SUB end of capture socket. - */ - int capture_events(events_data_lst_t &lst); + int run(); + + void *m_ctx; + void *m_frontend; + void *m_backend; + void *m_capture; + thread m_thr; +}; + + +/* + * Capture/Cache service + * + * The service started in a dedicted thread upon demand. + * It is controlled by the caller. + * On cache init, the thread is created. + * Upon create, it creates a SUB socket to PUB end point of capture. + * PUB end point is maintained by zproxy service. + * + * On Cache start, the thread is signalled to start reading. + * + * On cache stop, it is signalled to stop reading and exit. Caller waits + * for thread to exit, before starting to read cached data, to ensure + * that the data is not handled by two threads concurrently. + * + * This thread maintains its own copy of cache. Reader, does a swap + * after thread exits. + * This thread ensures the cache is empty at the init. + * + * Upon cache start, the thread is blocked in receive call with timeout. + * Only upon receive/timeout, it would notice stop signal. Hence stop + * is not synchronous. The caller may wait for thread to terminate + * via thread.join(). + * + * Each event is 2 parts. It drops the first part, which is + * more for filtering events. It creates string from second part + * and saves it. + * + * The string is the serialized version of internal_event_ref + * + * It keeps two sets of data + * 1) List of all events received in vector in same order as received + * 2) Map of last event from each runtime id upon list overflow max size. + * + * We add to the vector as much as allowed by vector and max limit, + * whichever comes first. + * + * The sequence number in internal event will help assess the missed count + * by the consumer of the cache data. + * + */ +typedef enum { + NEED_INIT = 0, + INIT_CAPTURE, + START_CAPTURE, + STOP_CAPTURE +} capture_control_t; + +int capture_status; + +class capture_service +{ + public: + capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_socket(NULL), + m_ctl(NEED_INIT), m_status(0), m_cache_max(cache_max) + {} + + ~capture_service(); + + int set_control(capture_control_t ctrl, events_data_lst_t *p=NULL); + + int read_cache(events_data_lst_t &lst_fifo, + last_events_t &lst_last); private: - uint32_t m_cache_max; + void init_capture_cache(events_data_lst_t &lst); + void do_capture(); + + void stop_capture(); void *m_ctx; + void *m_socket; + capture_control_t m_ctrl; + thread m_thr; + + uint32_t m_cache_max; events_data_lst_t m_events; last_events_t m_last_events; - void *m_capture; + typedef map pre_exist_id_t; + pre_exist_id_t m_pre_exist_id; - thread m_thread_proxy; - thread m_thread_capture; }; +/* + * Main server, that starts the zproxy service and honor + * eventd service requests event_req_type_t + * + * For echo, it just echoes + * + * FOr cache start, create the SUB end of capture and kick off + * capture_events thread. Upon cache stop command, close the handle + * which will stop the caching thread with read failure. + * + * for cache read, returns the collected events in chunks. + * + */ +void run_eventd_service(); + + From ba48440d2e180404ae21d78f3cc57bf6f521577f Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Sat, 4 Jun 2022 00:41:31 +0000 Subject: [PATCH 10/66] first cut --- dockers/docker-eventd/Dockerfile.j2 | 35 ++++++++++++++++ dockers/docker-eventd/critical_processes | 1 + dockers/docker-eventd/eventd.sh | 3 ++ dockers/docker-eventd/start.sh | 6 +++ dockers/docker-eventd/supervisord.conf | 52 ++++++++++++++++++++++++ rules/docker-eventd.dep | 11 +++++ rules/docker-eventd.mk | 38 +++++++++++++++++ rules/eventd.dep | 12 ++++++ rules/eventd.mk | 19 +++++++++ src/sonic-eventd/Makefile | 42 ------------------- src/sonic-eventd/Makefile.am | 2 + src/sonic-eventd/eventd/Makefile.am | 17 ++++++++ src/sonic-eventd/eventd/eventd.cpp | 13 +++++- 13 files changed, 208 insertions(+), 43 deletions(-) create mode 100644 dockers/docker-eventd/Dockerfile.j2 create mode 100644 dockers/docker-eventd/critical_processes create mode 100755 dockers/docker-eventd/eventd.sh create mode 100755 dockers/docker-eventd/start.sh create mode 100644 dockers/docker-eventd/supervisord.conf create mode 100644 rules/docker-eventd.dep create mode 100644 rules/docker-eventd.mk create mode 100644 rules/eventd.dep create mode 100644 rules/eventd.mk delete mode 100644 src/sonic-eventd/Makefile create mode 100644 src/sonic-eventd/Makefile.am create mode 100644 src/sonic-eventd/eventd/Makefile.am diff --git a/dockers/docker-eventd/Dockerfile.j2 b/dockers/docker-eventd/Dockerfile.j2 new file mode 100644 index 000000000000..0a8fad29c358 --- /dev/null +++ b/dockers/docker-eventd/Dockerfile.j2 @@ -0,0 +1,35 @@ +{% from "dockers/dockerfile-macros.j2" import install_debian_packages, install_python_wheels, copy_files %} +FROM docker-config-engine-bullseye-{{DOCKER_USERNAME}}:{{DOCKER_USERTAG}} + +ARG docker_container_name +ARG image_version +RUN [ -f /etc/rsyslog.conf ] && sed -ri "s/%syslogtag%/$docker_container_name#%syslogtag%/;" /etc/rsyslog.conf + +# Make apt-get non-interactive +ENV DEBIAN_FRONTEND=noninteractive + +# Pass the image_version to container +ENV IMAGE_VERSION=$image_version + +# Update apt's cache of available packages +RUN apt-get update + +{% if docker_eventd_debs.strip() -%} +# Copy built Debian packages +{{ copy_files("debs/", docker_eventd_debs.split(' '), "/debs/") }} + +# Install built Debian packages and implicitly install their dependencies +{{ install_debian_packages(docker_eventd_debs.split(' ')) }} +{%- endif %} + +# Clean up +RUN apt-get clean -y && \ + apt-get autoclean -y && \ + apt-get autoremove -y && \ + rm -rf /debs + +COPY ["supervisord.conf", "/etc/supervisor/conf.d/"] +COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] +COPY ["critical_processes", "/etc/supervisor"] + +ENTRYPOINT ["/usr/local/bin/supervisord"] diff --git a/dockers/docker-eventd/critical_processes b/dockers/docker-eventd/critical_processes new file mode 100644 index 000000000000..8ff28edbc148 --- /dev/null +++ b/dockers/docker-eventd/critical_processes @@ -0,0 +1 @@ +program:eventd diff --git a/dockers/docker-eventd/eventd.sh b/dockers/docker-eventd/eventd.sh new file mode 100755 index 000000000000..a26e7ffd6ef8 --- /dev/null +++ b/dockers/docker-eventd/eventd.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +exec /usr/bin/eventd diff --git a/dockers/docker-eventd/start.sh b/dockers/docker-eventd/start.sh new file mode 100755 index 000000000000..60cd6a00aecb --- /dev/null +++ b/dockers/docker-eventd/start.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +if [ "${RUNTIME_OWNER}" == "" ]; then + RUNTIME_OWNER="kube" +fi + diff --git a/dockers/docker-eventd/supervisord.conf b/dockers/docker-eventd/supervisord.conf new file mode 100644 index 000000000000..be51f922c120 --- /dev/null +++ b/dockers/docker-eventd/supervisord.conf @@ -0,0 +1,52 @@ +[supervisord] +logfile_maxbytes=1MB +logfile_backups=2 +nodaemon=true + +[eventlistener:dependent-startup] +command=python3 -m supervisord_dependent_startup +autostart=true +autorestart=unexpected +startretries=0 +exitcodes=0,3 +events=PROCESS_STATE +buffer_size=1024 + +[eventlistener:supervisor-proc-exit-listener] +command=/usr/bin/supervisor-proc-exit-listener --container-name eventd +events=PROCESS_STATE_EXITED,PROCESS_STATE_RUNNING +autostart=true +autorestart=unexpected +buffer_size=1024 + +[program:rsyslogd] +command=/usr/sbin/rsyslogd -n -iNONE +priority=1 +autostart=false +autorestart=unexpected +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true + +[program:start] +command=/usr/bin/start.sh +priority=2 +autostart=false +autorestart=false +startsecs=0 +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true +dependent_startup_wait_for=rsyslogd:running + + +[program:eventd] +command=/usr/bin/eventd +priority=3 +autostart=false +autorestart=false +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true +dependent_startup_wait_for=start:exited + diff --git a/rules/docker-eventd.dep b/rules/docker-eventd.dep new file mode 100644 index 000000000000..382513e5eb82 --- /dev/null +++ b/rules/docker-eventd.dep @@ -0,0 +1,11 @@ + +DPATH := $($(DOCKER_EVENTD)_PATH) +DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/docker-eventd.mk rules/docker-eventd.dep +DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) +DEP_FILES += $(shell git ls-files $(DPATH)) + +$(DOCKER_EVENTD)_CACHE_MODE := GIT_CONTENT_SHA +$(DOCKER_EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) +$(DOCKER_EVENTD)_DEP_FILES := $(DEP_FILES) + +$(eval $(call add_dbg_docker,$(DOCKER_EVENTD),$(DOCKER_EVENTD_DBG))) diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk new file mode 100644 index 000000000000..f21565f78f5d --- /dev/null +++ b/rules/docker-eventd.mk @@ -0,0 +1,38 @@ +# docker image for eventd + +DOCKER_EVENTD_STEM = docker-eventd +DOCKER_EVENTD = $(DOCKER_EVENTD_STEM).gz +DOCKER_EVENTD_DBG = $(DOCKER_EVENTD_STEM)-$(DBG_IMAGE_MARK).gz + +$(DOCKER_EVENTD)_DEPENDS += $(EVENTD) + +$(DOCKER_EVENTD)_DBG_DEPENDS = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS) +$(DOCKER_EVENTD)_DBG_DEPENDS += $(EVENTD_DBG) $(LIBSWSSCOMMON_DBG) + +$(DOCKER_EVENTD)_DBG_IMAGE_PACKAGES = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_IMAGE_PACKAGES) + +$(DOCKER_EVENTD)_LOAD_DOCKERS = $(DOCKER_CONFIG_ENGINE_BULLSEYE) + +$(DOCKER_EVENTD)_PATH = $(DOCKERS_PATH)/$(DOCKER_EVENTD_STEM) + +$(DOCKER_EVENTD)_INSTALL_PYTHON_WHEELS = $(SONIC_UTILITIES_PY3) +$(DOCKER_EVENTD)_INSTALL_DEBS = $(PYTHON3_SWSSCOMMON) + +$(DOCKER_EVENTD)_VERSION = 1.0.0 +$(DOCKER_EVENTD)_PACKAGE_NAME = eventd + +$(DOCKER_DHCP)_SERVICE_REQUIRES = updategraph +$(DOCKER_DHCP)_SERVICE_AFTER = database + +SONIC_DOCKER_IMAGES += $(DOCKER_EVENTD) +SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_EVENTD) + +SONIC_DOCKER_DBG_IMAGES += $(DOCKER_EVENTD_DBG) +SONIC_INSTALL_DOCKER_DBG_IMAGES += $(DOCKER_EVENTD_DBG) + +$(DOCKER_EVENTD)_CONTAINER_NAME = eventd +$(DOCKER_EVENTD)_RUN_OPT += --privileged -t +$(DOCKER_EVENTD)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro + +SONIC_BULLSEYE_DOCKERS += $(DOCKER_EVENTD) +SONIC_BULLSEYE_DBG_DOCKERS += $(DOCKER_EVENTD_DBG) diff --git a/rules/eventd.dep b/rules/eventd.dep new file mode 100644 index 000000000000..eec3563d8917 --- /dev/null +++ b/rules/eventd.dep @@ -0,0 +1,12 @@ + +SPATH := $($(EVENTD)_SRC_PATH) +DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/eventd.mk rules/eventd.dep +DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) +SMDEP_FILES := $(addprefix $(SPATH)/,$(shell cd $(SPATH) && git ls-files)) + +$(EVENTD)_CACHE_MODE := GIT_CONTENT_SHA +$(EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) +$(EVENTD)_DEP_FILES := $(DEP_FILES) +$(EVENTD)_SMDEP_FILES := $(SMDEP_FILES) +$(EVENTD)_SMDEP_PATHS := $(SPATH) + diff --git a/rules/eventd.mk b/rules/eventd.mk new file mode 100644 index 000000000000..367f34e26d3a --- /dev/null +++ b/rules/eventd.mk @@ -0,0 +1,19 @@ +# eventd package + +EVENTD = eventd.0.0_$(CONFIGURED_ARCH).deb +$(EVENTD)_SRC_PATH = $(SRC_PATH)/sonic-eventd +$(EVENTD)_DEPENDS += $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) + +$(EVENTD)_RDEPENDS += $(LIBSWSSCOMMON) +SONIC_DPKG_DEBS += $(EVENTD) + +EVENTD_DBG = eventd-dbg_1.0.0_$(CONFIGURED_ARCH).deb +$(EVENTD_DBG)_DEPENDS += $(EVENTD) +$(EVENTD_DBG)_RDEPENDS += $(EVENTD) +$(eval $(call add_derived_package,$(EVENTD),$(EVENTD_DBG))) + +# The .c, .cpp, .h & .hpp files under src/{$DBG_SRC_ARCHIVE list} +# are archived into debug one image to facilitate debugging. +# +DBG_SRC_ARCHIVE += sonic-eventd + diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile deleted file mode 100644 index 07c0ef41dca0..000000000000 --- a/src/sonic-eventd/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -RM := rm -rf -EVENTD_TARGET := eventd -CP := cp -MKDIR := mkdir -CC := g++ -MV := mv -LIBS := -levent -lswsscommon -pthread -lzmq -CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common -PWD := $(shell pwd) - -ifneq ($(MAKECMDGOALS),clean) -ifneq ($(strip $(C_DEPS)),) --include $(C_DEPS) $(OBJS) -endif -endif - --include src/subdir.mk - -all: sonic-eventd - -sonic-eventd: $(OBJS) - @echo 'Building target: $@' - @echo 'Invoking: G++ Linker' - $(CC) $(LDFLAGS) -o $(EVENTD_TARGET) $(OBJS) $(LIBS) - @echo 'Finished building target: $@' - @echo ' ' - -install: - $(MKDIR) -p $(DESTDIR)/usr/sbin - $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin - -deinstall: - $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) - $(RM) -rf $(DESTDIR)/usr/sbin - -clean: - -$(RM) $(EXECUTABLES) $(C_DEPS) $(OBJS) $(EVENTD_TARGET) - -@echo ' ' - -.PHONY: all clean dependents - - diff --git a/src/sonic-eventd/Makefile.am b/src/sonic-eventd/Makefile.am new file mode 100644 index 000000000000..095f0f81f07e --- /dev/null +++ b/src/sonic-eventd/Makefile.am @@ -0,0 +1,2 @@ +SUBDIRS = eventd + diff --git a/src/sonic-eventd/eventd/Makefile.am b/src/sonic-eventd/eventd/Makefile.am new file mode 100644 index 000000000000..d239648b216b --- /dev/null +++ b/src/sonic-eventd/eventd/Makefile.am @@ -0,0 +1,17 @@ +INCLUDES = -I $(top_srcdir)/lib \ + -I $(top_srcdir) + +bin_PROGRAMS = eventd + +if DEBUG +DBGFLAGS = -ggdb -DDEBUG +else +DBGFLAGS = -g -DNDEBUG +endif + +eventd_sources = eventd.cpp + +eventd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) +eventd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) +eventd_LDADD = -lpthread -lswsscommon -lzmq -lboost_serialization -luuid + diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/eventd/eventd.cpp index fde4fd299d35..c798f4f5bf4b 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/eventd/eventd.cpp @@ -430,7 +430,18 @@ run_eventd_service() zmq_ctx_term(zctx); } SWSS_LOG_ERROR("Eventd service exiting\n"); +} + + +int main() +{ + SWSS_LOG_INFO("The eventd service started"); - meturn 0; + run_eventd_service(); + + SWSS_LOG_INFO("The eventd service exited"); + + return 0; } + From 99fa2c33e1eed8685e64195b2870b913610557a3 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 6 Jun 2022 17:48:51 +0000 Subject: [PATCH 11/66] Trying to build --- rules/docker-eventd.mk | 4 ++-- rules/eventd.dep | 12 +++++------- rules/eventd.mk | 18 +++++++++--------- src/sonic-eventd/debian/changelog | 5 +++++ src/sonic-eventd/debian/compat | 1 + src/sonic-eventd/debian/control | 14 ++++++++++++++ src/sonic-eventd/debian/rules | 6 ++++++ 7 files changed, 42 insertions(+), 18 deletions(-) create mode 100644 src/sonic-eventd/debian/changelog create mode 100644 src/sonic-eventd/debian/compat create mode 100644 src/sonic-eventd/debian/control create mode 100755 src/sonic-eventd/debian/rules diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk index f21565f78f5d..304f295e2a4b 100644 --- a/rules/docker-eventd.mk +++ b/rules/docker-eventd.mk @@ -4,10 +4,10 @@ DOCKER_EVENTD_STEM = docker-eventd DOCKER_EVENTD = $(DOCKER_EVENTD_STEM).gz DOCKER_EVENTD_DBG = $(DOCKER_EVENTD_STEM)-$(DBG_IMAGE_MARK).gz -$(DOCKER_EVENTD)_DEPENDS += $(EVENTD) +$(DOCKER_EVENTD)_DEPENDS += $(SONIC_EVENTD) $(DOCKER_EVENTD)_DBG_DEPENDS = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS) -$(DOCKER_EVENTD)_DBG_DEPENDS += $(EVENTD_DBG) $(LIBSWSSCOMMON_DBG) +$(DOCKER_EVENTD)_DBG_DEPENDS += $(SONIC_EVENTD_DBG) $(LIBSWSSCOMMON_DBG) $(DOCKER_EVENTD)_DBG_IMAGE_PACKAGES = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_IMAGE_PACKAGES) diff --git a/rules/eventd.dep b/rules/eventd.dep index eec3563d8917..12f32a30f2c7 100644 --- a/rules/eventd.dep +++ b/rules/eventd.dep @@ -1,12 +1,10 @@ -SPATH := $($(EVENTD)_SRC_PATH) +SPATH := $($(SONIC_EVENTD)_SRC_PATH) DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/eventd.mk rules/eventd.dep DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) -SMDEP_FILES := $(addprefix $(SPATH)/,$(shell cd $(SPATH) && git ls-files)) +DEP_FILES := $(addprefix $(SPATH)/,$(shell cd $(SPATH) && git ls-files)) -$(EVENTD)_CACHE_MODE := GIT_CONTENT_SHA -$(EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) -$(EVENTD)_DEP_FILES := $(DEP_FILES) -$(EVENTD)_SMDEP_FILES := $(SMDEP_FILES) -$(EVENTD)_SMDEP_PATHS := $(SPATH) +$(SONIC_EVENTD)_CACHE_MODE := GIT_CONTENT_SHA +$(SONIC_EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) +$(SONIC_EVENTD)_DEP_FILES := $(DEP_FILES) diff --git a/rules/eventd.mk b/rules/eventd.mk index 367f34e26d3a..9eea21a4cfb5 100644 --- a/rules/eventd.mk +++ b/rules/eventd.mk @@ -1,16 +1,16 @@ # eventd package -EVENTD = eventd.0.0_$(CONFIGURED_ARCH).deb -$(EVENTD)_SRC_PATH = $(SRC_PATH)/sonic-eventd -$(EVENTD)_DEPENDS += $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) +SONIC_EVENTD_VERSION = 1.0.0-0 +SONIC_EVENTD_PKG_NAME = eventd -$(EVENTD)_RDEPENDS += $(LIBSWSSCOMMON) -SONIC_DPKG_DEBS += $(EVENTD) +SONIC_EVENTD = sonic-$(SONIC_EVENTD_PKG_NAME)_$(SONIC_EVENTD_VERSION)_$(CONFIGURED_ARCH).deb +$(SONIC_EVENTD)_SRC_PATH = $(SRC_PATH)/sonic-eventd +$(SONIC_EVENTD)_DEPENDS += $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) -EVENTD_DBG = eventd-dbg_1.0.0_$(CONFIGURED_ARCH).deb -$(EVENTD_DBG)_DEPENDS += $(EVENTD) -$(EVENTD_DBG)_RDEPENDS += $(EVENTD) -$(eval $(call add_derived_package,$(EVENTD),$(EVENTD_DBG))) +SONIC_DPKG_DEBS += $(SONIC_EVENTD) + +SONIC_EVENTD_DBG = sonic-$(SONIC_EVENTD_PKG_NAME)-dbgsym_$(SONIC_EVENTD_VERSION)_$(CONFIGURED_ARCH).deb +$(eval $(call add_derived_package,$(SONIC_EVENTD),$(SONIC_EVENTD_DBG))) # The .c, .cpp, .h & .hpp files under src/{$DBG_SRC_ARCHIVE list} # are archived into debug one image to facilitate debugging. diff --git a/src/sonic-eventd/debian/changelog b/src/sonic-eventd/debian/changelog new file mode 100644 index 000000000000..eba3bf10ea53 --- /dev/null +++ b/src/sonic-eventd/debian/changelog @@ -0,0 +1,5 @@ +sonic-eventd (1.0.0-0) UNRELEASED; urgency=medium + + * Initial release. + +-- Renuka Manavalan diff --git a/src/sonic-eventd/debian/compat b/src/sonic-eventd/debian/compat new file mode 100644 index 000000000000..48082f72f087 --- /dev/null +++ b/src/sonic-eventd/debian/compat @@ -0,0 +1 @@ +12 diff --git a/src/sonic-eventd/debian/control b/src/sonic-eventd/debian/control new file mode 100644 index 000000000000..8611b8f1ac81 --- /dev/null +++ b/src/sonic-eventd/debian/control @@ -0,0 +1,14 @@ +Source: sonic-eventd +Section: devel +Priority: optional +Maintainer: Kelly Yeh +Build-Depends: debhelper (>= 12.0.0), libevent-dev, libboost-thread-dev, libboost-system-dev, libswsscommon-dev +Standards-Version: 3.9.3 +Homepage: https://github.com/Azure/sonic-buildimage +XS-Go-Import-Path: github.com/Azure/sonic-buildimage + +Package: sonic-eventd +Architecture: any +Built-Using: ${misc:Built-Using} +Depends: ${shlibs:Depends} +Description: SONiC event service diff --git a/src/sonic-eventd/debian/rules b/src/sonic-eventd/debian/rules new file mode 100755 index 000000000000..ac2cd63889ef --- /dev/null +++ b/src/sonic-eventd/debian/rules @@ -0,0 +1,6 @@ +#!/usr/bin/make -f + +export DEB_BUILD_MAINT_OPTIONS=hardening=+all + +%: + dh $@ --parallel From bfa0b2c20ac34392f4b43398bedabeb28e2833e5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 7 Jun 2022 14:47:09 +0000 Subject: [PATCH 12/66] updates per review --- .../yang-events/sonic-events-bgp.yang | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 1406a8b24047..906ebd91f248 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -28,8 +28,8 @@ module sonic-events-bgp { oc-alarm-types:MINOR description " - Declares an event for BGP state for a neighbor IP - The status says up or down"; + Declares an event for BGP state for a neighbor IP going up. + Going down is reported via notification; leaf ip { type inet:ip-address; @@ -39,7 +39,6 @@ module sonic-events-bgp { leaf status { type enumeration { enum "up"; - enum "down"; } description "Provides the status as up (true) or down (false)"; } @@ -47,13 +46,37 @@ module sonic-events-bgp { uses evtcmn:sonic-events-cmn; } - container bgp-hold-timer { + container notification { oc-alarm-types:MAJOR description " - Declares an event for BGP hold timer expiry. - This event does not have any other parameter. - Hence source + tag identifies an event"; + Reports an notification. + The error codes as per IANA. + The other params are as in the message"; + + leaf major-code { + type uint8; + description "Major IANA error code; [RFC4271][RFC7313]"; + } + + leaf minor-code { + type uint8; + description "Minor IANA error code; [RFC4271][RFC7313]"; + } + + leaf ip { + type inet:ip-address; + description "IP of neighbor associated with this notification"; + } + + leaf ASN { + type uint32; + description "ASN number from the notification"; + } + + leaf is-sent { + type boolean; + description "true - if this notification was for sent messages; false if it was for received."; uses evtcmn:sonic-events-cmn; } From c903f024353c1dbb797f96e653de72ed518b38ad Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 7 Jun 2022 17:34:39 +0000 Subject: [PATCH 13/66] build in progress --- src/sonic-eventd/Makefile | 41 ++++++++++++ src/sonic-eventd/Makefile.am | 2 - src/sonic-eventd/eventd/Makefile.am | 17 ----- src/sonic-eventd/{eventd => src}/eventd.cpp | 72 +++++++++++---------- src/sonic-eventd/{eventd => src}/eventd.h | 10 +-- src/sonic-eventd/src/subdir.mk | 12 ++++ 6 files changed, 95 insertions(+), 59 deletions(-) create mode 100644 src/sonic-eventd/Makefile delete mode 100644 src/sonic-eventd/Makefile.am delete mode 100644 src/sonic-eventd/eventd/Makefile.am rename src/sonic-eventd/{eventd => src}/eventd.cpp (86%) rename src/sonic-eventd/{eventd => src}/eventd.h (94%) create mode 100644 src/sonic-eventd/src/subdir.mk diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile new file mode 100644 index 000000000000..1fa19724b08b --- /dev/null +++ b/src/sonic-eventd/Makefile @@ -0,0 +1,41 @@ +RM := rm -rf +EVENTD_TARGET := eventd +CP := cp +MKDIR := mkdir +CC := g++ +MV := mv +LIBS := -levent -lhiredis -lswsscommon -pthread -lboost_thread -lboost_system +CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common +PWD := $(shell pwd) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(strip $(C_DEPS)),) +-include $(C_DEPS) $(OBJS) +endif +endif + +-include src/subdir.mk + +all: sonic-eventd + +sonic-eventd: $(OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TARGET) $(OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +install: + $(MKDIR) -p $(DESTDIR)/usr/sbin + $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + +deinstall: + $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) + $(RM) -rf $(DESTDIR)/usr/sbin + +clean: + -@echo ' ' + +.PHONY: all clean dependents + + diff --git a/src/sonic-eventd/Makefile.am b/src/sonic-eventd/Makefile.am deleted file mode 100644 index 095f0f81f07e..000000000000 --- a/src/sonic-eventd/Makefile.am +++ /dev/null @@ -1,2 +0,0 @@ -SUBDIRS = eventd - diff --git a/src/sonic-eventd/eventd/Makefile.am b/src/sonic-eventd/eventd/Makefile.am deleted file mode 100644 index d239648b216b..000000000000 --- a/src/sonic-eventd/eventd/Makefile.am +++ /dev/null @@ -1,17 +0,0 @@ -INCLUDES = -I $(top_srcdir)/lib \ - -I $(top_srcdir) - -bin_PROGRAMS = eventd - -if DEBUG -DBGFLAGS = -ggdb -DDEBUG -else -DBGFLAGS = -g -DNDEBUG -endif - -eventd_sources = eventd.cpp - -eventd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -eventd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -eventd_LDADD = -lpthread -lswsscommon -lzmq -lboost_serialization -luuid - diff --git a/src/sonic-eventd/eventd/eventd.cpp b/src/sonic-eventd/src/eventd.cpp similarity index 86% rename from src/sonic-eventd/eventd/eventd.cpp rename to src/sonic-eventd/src/eventd.cpp index c798f4f5bf4b..392cd62cca4a 100644 --- a/src/sonic-eventd/eventd/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -21,29 +21,31 @@ /* Count of elements returned in each read */ #define READ_SET_SIZE 100 +#define VEC_SIZE(p) ((int)p.size()) + int eventd_proxy::init() { - int ret = -1; + int ret = -1, rc = 0; SWSS_LOG_INFO("Start xpub/xsub proxy"); m_frontend = zmq_socket(m_ctx, ZMQ_XSUB); RET_ON_ERR(m_frontend != NULL, "failing to get ZMQ_XSUB socket"); - int rc = zmq_bind(m_frontend, get_config(XSUB_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind XSUB to %s", get_config(XSUB_END_KEY)); + rc = zmq_bind(m_frontend, get_config(string(XSUB_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind XSUB to %s", get_config(string(XSUB_END_KEY)).c_str()); m_backend = zmq_socket(m_ctx, ZMQ_XPUB); RET_ON_ERR(m_backend != NULL, "failing to get ZMQ_XPUB socket"); - rc = zmq_bind(m_backend, get_config(XPUB_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(XPUB_END_KEY)); + rc = zmq_bind(m_backend, get_config(string(XPUB_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(string(XPUB_END_KEY)).c_str()); m_capture = zmq_socket(m_ctx, ZMQ_PUB); RET_ON_ERR(m_capture != NULL, "failing to get ZMQ_PUB socket for capture"); - rc = zmq_bind(m_capture, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config(CAPTURE_END_KEY)); + rc = zmq_bind(m_capture, get_config(string(CAPTURE_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); m_thr = thread(&eventd_proxy::run, this); ret = 0; @@ -58,8 +60,6 @@ eventd_proxy::run() /* runs forever until zmq context is terminated */ zmq_proxy(m_frontend, m_backend, m_capture); - - return 0; } @@ -68,6 +68,7 @@ capture_service::~capture_service() stop_capture(); } +void capture_service::stop_capture() { if (m_socket != NULL) { @@ -80,7 +81,7 @@ capture_service::stop_capture() } static bool -validate_event(const internal_event_t &event, string &rid, string &seq) +validate_event(const internal_event_t &event, runtime_id_t &rid, sequence_t &seq) { bool ret = false; @@ -90,13 +91,12 @@ validate_event(const internal_event_t &event, string &rid, string &seq) itc_e = event.find(EVENT_STR_DATA); if ((itc_r != event.end()) && (itc_s != event.end()) && (itc_e != event.end())) { - invalid_evt = true; - + ret = true; rid = itc_r->second; - seq = itc_s->second; + seq = str_to_seq(itc_s->second); } else { - SWSS_LOG_ERROR("Invalid evt: %s", map_to_str(event).str()); + SWSS_LOG_ERROR("Invalid evt: %s", map_to_str(event).c_str()); } return ret; @@ -104,12 +104,12 @@ validate_event(const internal_event_t &event, string &rid, string &seq) void -capture_service::init_capture_cache(events_data_lst_t &lst) +capture_service::init_capture_cache(const events_data_lst_t &lst) { /* clean any pre-existing cache */ - int ret = -1; int i; - string rid, seq; + runtime_id_t rid; + sequence_t seq; /* * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only @@ -121,13 +121,14 @@ capture_service::init_capture_cache(events_data_lst_t &lst) m_last_events[to_string(i)] = ""; } - /* Cache last - for (events_data_lst_t::it = lst.begin(); it != lst.end(); ++it) { + /* Cache last events -- as only the last instance */ + /* This is required to compute missed count */ + for (events_data_lst_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { internal_event_t event; if (deserialize(*itc, event) == 0) { if (validate_event(event, rid, seq)) { - m_pre_exist_id[event[EVENT_RUNTIME_ID]] = rid; + m_pre_exist_id[rid] = seq; m_events.push_back(*itc); } } @@ -142,12 +143,11 @@ void capture_service::do_capture() { /* clean any pre-existing cache */ - int ret = -1; - int i; - string rid, seq; + runtime_id_t rid; + sequence_t seq; /* Check read events against provided cache for 2 seconds to skip */ - chrono::steady_clock::timepoint start = chrono::steady_clock::now(); + chrono::steady_clock::time_point start = chrono::steady_clock::now(); while(!m_pre_exist_id.empty()) { internal_event_t event; string source, evt_str; @@ -158,10 +158,9 @@ capture_service::do_capture() if (validate_event(event, rid, seq)) { serialize(event, evt_str); - m_pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); + pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); if (it != m_pre_exist_id.end()) { - seq = events_base::str_to_seq(seq); if (seq > it->second) { m_events.push_back(evt_str); } @@ -176,7 +175,7 @@ capture_service::do_capture() pre_exist_id_t().swap(m_pre_exist_id); /* Save until max allowed */ - while(m_events.size() < m_cache_max) { + while(VEC_SIZE(m_events) < m_cache_max) { internal_event_t event; string source, evt_str; @@ -193,7 +192,7 @@ capture_service::do_capture() stringstream ss; ss << e.what(); SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", - ss.str().c_str(), m_events.size()); + ss.str().c_str(), VEC_SIZE(m_events)); break; } } @@ -218,13 +217,13 @@ capture_service::do_capture() * Capture stop will close the socket which fail the read * and hence bail out. */ - return 0; } int capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) { int ret = -1; + int rc; /* Can go in single step only. */ RET_ON_ERR((ctrl - m_ctrl) == 1, "m_ctrl(%d) > ctrl(%d)", m_ctrl, ctrl); @@ -237,8 +236,8 @@ capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) sock = zmq_socket(m_ctx, ZMQ_SUB); RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); - rc = zmq_connect(sock, get_config(CAPTURE_END_KEY)); - RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(CAPTURE_END_KEY)); + rc = zmq_connect(sock, get_config(string(CAPTURE_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); rc = zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); @@ -303,6 +302,7 @@ capture_service::read_cache(events_data_lst_t &lst_fifo, void run_eventd_service() { + int cache_max; event_service service; eventd_proxy *proxy = NULL; capture_service *capture = NULL; @@ -315,7 +315,7 @@ run_eventd_service() void *zctx = zmq_ctx_new(); RET_ON_ERR(ctx != NULL, "Failed to get zmq ctx"); - cache_max = get_config_data(CACHE_MAX_CNT, (uint32_t)MAX_CACHE_SIZE); + cache_max = get_config_data(string(CACHE_MAX_CNT), (int)MAX_CACHE_SIZE)); RET_ON_ERR(cache_max > 0, "Failed to get CACHE_MAX_CNT"); proxy = new eventd_proxy(zctx); @@ -389,20 +389,22 @@ run_eventd_service() last_events_t().swap(capture_last_events); } - int sz = capture_fifo_events.size() < READ_SET_SIZE ? - capture_fifo_events.size() : READ_SET_SIZE; + { + int sz = VEC_SIZE(capture_fifo_events) < READ_SET_SIZE ? + VEC_SIZE(capture_fifo_events) : READ_SET_SIZE; if (sz != 0) { auto it = std::next(capture_fifo_events.begin(), sz); move(capture_fifo_events.begin(), capture_fifo_events.end(), back_inserter(resp_data)); - if (sz == capture_fifo_events.size()) { + if (sz == VEC_SIZE(capture_fifo_events)) { events_data_lst_t().swap(capture_fifo_events); } else { events.erase(capture_fifo_events.begin(), it); } } + } break; diff --git a/src/sonic-eventd/eventd/eventd.h b/src/sonic-eventd/src/eventd.h similarity index 94% rename from src/sonic-eventd/eventd/eventd.h rename to src/sonic-eventd/src/eventd.h index 3b20ad344916..0b71f2628234 100644 --- a/src/sonic-eventd/eventd/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -24,14 +24,14 @@ class eventd_proxy zmq_close(m_backend); zmq_close(m_capture); - if (m_thr.joinable) + if (m_thr.joinable()) m_thr.join(); } int init(); private: - int run(); + void run(); void *m_ctx; void *m_frontend; @@ -96,7 +96,7 @@ class capture_service { public: capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_socket(NULL), - m_ctl(NEED_INIT), m_status(0), m_cache_max(cache_max) + m_ctrl(NEED_INIT), m_cache_max(cache_max) {} ~capture_service(); @@ -107,7 +107,7 @@ class capture_service last_events_t &lst_last); private: - void init_capture_cache(events_data_lst_t &lst); + void init_capture_cache(const events_data_lst_t &lst); void do_capture(); void stop_capture(); @@ -117,7 +117,7 @@ class capture_service capture_control_t m_ctrl; thread m_thr; - uint32_t m_cache_max; + int m_cache_max; events_data_lst_t m_events; diff --git a/src/sonic-eventd/src/subdir.mk b/src/sonic-eventd/src/subdir.mk new file mode 100644 index 000000000000..6c2ca1f81f3f --- /dev/null +++ b/src/sonic-eventd/src/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +OBJS += ./src/eventd.o + +C_DEPS += ./src/eventd.d + +src/%.o: src/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst src/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' From 091ced5739b6bcfe51db9adda39279292911e956 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 7 Jun 2022 17:42:25 +0000 Subject: [PATCH 14/66] per comments --- src/sonic-yang-models/yang-events/sonic-events-bgp.yang | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 906ebd91f248..17fa64c82fbd 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -28,8 +28,7 @@ module sonic-events-bgp { oc-alarm-types:MINOR description " - Declares an event for BGP state for a neighbor IP going up. - Going down is reported via notification; + Declares an event for BGP state for a neighbor IP going up/down.; leaf ip { type inet:ip-address; @@ -39,6 +38,8 @@ module sonic-events-bgp { leaf status { type enumeration { enum "up"; + enum "down"; + } description "Provides the status as up (true) or down (false)"; } From 78e517642089073932beb3ac4dd98d93d88a7de2 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 8 Jun 2022 00:36:42 +0000 Subject: [PATCH 15/66] test code written --- src/sonic-eventd/Makefile | 15 +++- src/sonic-eventd/src/eventd.cpp | 31 +++---- src/sonic-eventd/src/eventd.h | 2 - src/sonic-eventd/src/main.cpp | 14 ++++ src/sonic-eventd/src/subdir.mk | 5 +- src/sonic-eventd/tests/eventd_ut.cpp | 117 +++++++++++++++++++++++++++ src/sonic-eventd/tests/main.cpp | 10 +++ src/sonic-eventd/tests/subdir.mk | 12 +++ 8 files changed, 179 insertions(+), 27 deletions(-) create mode 100644 src/sonic-eventd/src/main.cpp create mode 100644 src/sonic-eventd/tests/eventd_ut.cpp create mode 100644 src/sonic-eventd/tests/main.cpp create mode 100644 src/sonic-eventd/tests/subdir.mk diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index 1fa19724b08b..3fe5a71b3f40 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -1,10 +1,13 @@ RM := rm -rf EVENTD_TARGET := eventd +EVENTD_TEST := tests/tests CP := cp MKDIR := mkdir CC := g++ MV := mv -LIBS := -levent -lhiredis -lswsscommon -pthread -lboost_thread -lboost_system +LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid +TEST_LIBS := -L/usr/src/gtest -lgtest -lgtest_main -lgmock -lgmock_main + CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common PWD := $(shell pwd) @@ -15,8 +18,9 @@ endif endif -include src/subdir.mk +-include tests/subdir.mk -all: sonic-eventd +all: sonic-eventd eventd-tests sonic-eventd: $(OBJS) @echo 'Building target: $@' @@ -25,6 +29,13 @@ sonic-eventd: $(OBJS) @echo 'Finished building target: $@' @echo ' ' +eventd-tests: $(TEST_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) + @echo 'Finished building target: $@' + @echo ' ' + install: $(MKDIR) -p $(DESTDIR)/usr/sbin $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 392cd62cca4a..f183ac8b4dea 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -217,8 +217,10 @@ capture_service::do_capture() * Capture stop will close the socket which fail the read * and hence bail out. */ + return; } + int capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) { @@ -255,7 +257,7 @@ capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) * if overall mem consumption is too high. Clearing the map just before use * is likely to help. */ - for (i=0; i 0, "Failed to get CACHE_MAX_CNT"); proxy = new eventd_proxy(zctx); @@ -329,7 +331,7 @@ run_eventd_service() int code, resp = -1; events_data_lst_t req_data, resp_data; - RET_ON_ERR(channel_read(code, data) == 0, + RET_ON_ERR(service.channel_read(code, req_data) == 0, "Failed to read request"); switch(code) { @@ -354,7 +356,7 @@ run_eventd_service() resp = -1; break; } - resp = capture->set_control(START_CAPTURE, req_data); + resp = capture->set_control(START_CAPTURE, &req_data); break; @@ -401,7 +403,7 @@ run_eventd_service() if (sz == VEC_SIZE(capture_fifo_events)) { events_data_lst_t().swap(capture_fifo_events); } else { - events.erase(capture_fifo_events.begin(), it); + capture_fifo_events.erase(capture_fifo_events.begin(), it); } } } @@ -417,11 +419,11 @@ run_eventd_service() assert(false); break; } - RET_ON_ERR(channel_write(resp_code, resp_data) == 0, + RET_ON_ERR(service.channel_write(resp, resp_data) == 0, "Failed to write response back"); } out: - m_service.close(); + service.close_service(); if (proxy != NULL) { delete proxy; } @@ -434,16 +436,3 @@ run_eventd_service() SWSS_LOG_ERROR("Eventd service exiting\n"); } - -int main() -{ - SWSS_LOG_INFO("The eventd service started"); - - run_eventd_service(); - - SWSS_LOG_INFO("The eventd service exited"); - - return 0; -} - - diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h index 0b71f2628234..753615699b8f 100644 --- a/src/sonic-eventd/src/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -90,8 +90,6 @@ typedef enum { } capture_control_t; -int capture_status; - class capture_service { public: diff --git a/src/sonic-eventd/src/main.cpp b/src/sonic-eventd/src/main.cpp new file mode 100644 index 000000000000..67026b070182 --- /dev/null +++ b/src/sonic-eventd/src/main.cpp @@ -0,0 +1,14 @@ + +void run_eventd_service(); + +int main() +{ + SWSS_LOG_INFO("The eventd service started"); + + run_eventd_service(); + + SWSS_LOG_INFO("The eventd service exited"); + + return 0; +} + diff --git a/src/sonic-eventd/src/subdir.mk b/src/sonic-eventd/src/subdir.mk index 6c2ca1f81f3f..a1e2b55f8d13 100644 --- a/src/sonic-eventd/src/subdir.mk +++ b/src/sonic-eventd/src/subdir.mk @@ -1,8 +1,9 @@ CC := g++ -OBJS += ./src/eventd.o +TEST_OBJS += ./src/eventd.o +OBJS += ./src/eventd.o ./src/main.o -C_DEPS += ./src/eventd.d +C_DEPS += ./src/eventd.d ./src/main.d src/%.o: src/%.cpp @echo 'Building file: $<' diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp new file mode 100644 index 000000000000..9be6369b8468 --- /dev/null +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -0,0 +1,117 @@ +#include +#include +#include +#include +#include +#include +#include +#include "gtest/gtest.h" +#include "events_common.h" +#include "events.h" +#include "../src/eventd.h" + +using namespace std; + +typedef vector lst_events_t; + +void run_sub(void *zctx, bool &term, string &read_source, lst_events_t &lst) +{ + void *mock_sub = zmq_socket (zctx, ZMQ_SUB); + string source; + internal_event_t ev_int; + int block_ms = 200; + + EXPECT_TRUE(NULL != mock_sub); + EXPECT_EQ(0, zmq_connect(mock_sub, get_config(XPUB_END_KEY).c_str())); + EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_SUBSCRIBE, "", 0)); + EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); + + while(!term) { + if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { + lst.push_back(ev_int); + read_source.swap(source); + } + } + + zmq_close(mock_sub); +} + +void *init_pub(void *zctx) +{ + void *mock_pub = zmq_socket (zctx, ZMQ_PUB); + EXPECT_TRUE(NULL != mock_pub); + EXPECT_EQ(0, zmq_connect(mock_pub, get_config(XSUB_END_KEY).c_str())); + + return mock_pub; +} + +void run_pub(void *mock_pub, const string wr_source, lst_events_t &lst) +{ + for(lst_events_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { + EXPECT_EQ(0, zmq_message_send(mock_pub, wr_source, *itc)); + } +} + + +static internal_event_t +create_ev(const string rid, sequence_t n, const string d) +{ + stringstream ss; + + ss << d << ":" << n; + + return internal_event_t({ {EVENT_STR_DATA, ss.str()}, + { EVENT_RUNTIME_ID, rid }, { EVENT_SEQUENCE, seq_to_str(n) }}); +} + + + +TEST(eventd, proxy) +{ + printf("TEST started\n"); + bool term_sub = false; + string rd_source, wr_source("hello"); + lst_events_t rd_evts, wr_evts; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* subscriber in a thread */ + thread thr(&run_sub, zctx, ref(term_sub), ref(rd_source), ref(rd_evts)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + /* Provide time for async connect to complete */ + this_thread::sleep_for(chrono::milliseconds(100)); + + for(int i=0; i<5; ++i) { + wr_evts.push_back(create_ev("hello", i, "test body")); + } + + EXPECT_TRUE(rd_evts.empty()); + EXPECT_TRUE(rd_source.empty()); + + /* Publish events. */ + run_pub(mock_pub, wr_source, wr_evts); + + while(rd_evts.size() != wr_evts.size()) { + printf("rd_evts.size != wr_evts.size %d != %d\n", + (int)rd_evts.size(), (int)wr_evts.size()); + this_thread::sleep_for(chrono::milliseconds(10)); + } + + term_sub = true; + printf("Waiting for sub thread to join...\n"); + + thr.join(); + zmq_close(mock_pub); + zmq_ctx_term(zctx); +} + diff --git a/src/sonic-eventd/tests/main.cpp b/src/sonic-eventd/tests/main.cpp new file mode 100644 index 000000000000..f803fbc39d5a --- /dev/null +++ b/src/sonic-eventd/tests/main.cpp @@ -0,0 +1,10 @@ +#include "gtest/gtest.h" +#include + +using namespace std; + +int main(int argc, char* argv[]) +{ + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/sonic-eventd/tests/subdir.mk b/src/sonic-eventd/tests/subdir.mk new file mode 100644 index 000000000000..a29131c57000 --- /dev/null +++ b/src/sonic-eventd/tests/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +TEST_OBJS += ./tests/eventd_ut.o ./tests/main.o + +C_DEPS += ./tests/eventd_ut.d ./tests/main.d + +tests/%.o: tests/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst src/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' From c6dad25a01f608bd8f278f9d61336307e0ad4117 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 9 Jun 2022 04:43:30 +0000 Subject: [PATCH 16/66] Adding test code --- src/sonic-eventd/src/eventd.cpp | 141 +++++++------ src/sonic-eventd/src/eventd.h | 14 +- src/sonic-eventd/src/main.cpp | 1 + src/sonic-eventd/tests/eventd_ut.cpp | 292 +++++++++++++++++++++++++-- 4 files changed, 353 insertions(+), 95 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index f183ac8b4dea..6099b9d8ef09 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -23,6 +23,8 @@ #define VEC_SIZE(p) ((int)p.size()) +extern int zerrno; + int eventd_proxy::init() { @@ -60,6 +62,8 @@ eventd_proxy::run() /* runs forever until zmq context is terminated */ zmq_proxy(m_frontend, m_backend, m_capture); + + SWSS_LOG_INFO("Stopped xpub/xsub proxy"); } @@ -71,10 +75,8 @@ capture_service::~capture_service() void capture_service::stop_capture() { - if (m_socket != NULL) { - zmq_close(m_socket); - m_socket = NULL; - } + m_ctrl = STOP_CAPTURE; + if (m_thr.joinable()) { m_thr.join(); } @@ -104,26 +106,18 @@ validate_event(const internal_event_t &event, runtime_id_t &rid, sequence_t &seq void -capture_service::init_capture_cache(const events_data_lst_t &lst) +capture_service::init_capture_cache(const event_serialized_lst_t &lst) { /* clean any pre-existing cache */ - int i; runtime_id_t rid; sequence_t seq; - /* - * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only - * upon m_events/vector overflow, which might block adding new entries in map - * if overall mem consumption is too high. Clearing the map just before use - * is likely to help. + /* Cache given events as initial stock. + * Save runtime ID with last seen seq to avoid duplicates, while reading + * from capture socket. + * No check for max cache size here, as most likely not needed. */ - for (i=0; i ctrl(%d)", m_ctrl, ctrl); - m_ctrl = ctrl; + RET_ON_ERR((ctrl - m_ctrl) == 1, "m_ctrl(%d)+1 < ctrl(%d)", m_ctrl, ctrl); - switch(m_ctrl) { + switch(ctrl) { case INIT_CAPTURE: - { - void *sock = NULL; - sock = zmq_socket(m_ctx, ZMQ_SUB); - RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); - - rc = zmq_connect(sock, get_config(string(CAPTURE_END_KEY)).c_str()); - RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); - - rc = zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); - RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); - - m_socket = sock; + m_thr = thread(&capture_service::do_capture, this); + for(int i=0; !m_cap_run && (i < 100); ++i) { + /* Wait max a second for thread to init */ + this_thread::sleep_for(chrono::milliseconds(10)); } + RET_ON_ERR(m_cap_run, "Failed to init capture"); + m_ctrl = ctrl; ret = 0; break; @@ -264,9 +274,7 @@ capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) if ((lst != NULL) && (!lst->empty())) { init_capture_cache(*lst); } - - m_thr = thread(&capture_service::do_capture, this); - RET_ON_ERR(m_thr.joinable(), "Capture thread not running"); + m_ctrl = ctrl; ret = 0; break; @@ -279,10 +287,11 @@ capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) */ this_thread::sleep_for(chrono::milliseconds(CACHE_DRAIN_IN_MILLISECS)); stop_capture(); + ret = 0; break; default: - SWSS_LOG_ERROR("Unexpected code=%d", m_ctrl); + SWSS_LOG_ERROR("Unexpected code=%d", ctrl); break; } out: @@ -290,13 +299,13 @@ capture_service::set_control(capture_control_t ctrl, events_data_lst_t *lst) } int -capture_service::read_cache(events_data_lst_t &lst_fifo, +capture_service::read_cache(event_serialized_lst_t &lst_fifo, last_events_t &lst_last) { lst_fifo.swap(m_events); lst_last.swap(m_last_events); last_events_t().swap(m_last_events); - events_data_lst_t().swap(m_events); + event_serialized_lst_t().swap(m_events); return 0; } @@ -309,7 +318,7 @@ run_eventd_service() eventd_proxy *proxy = NULL; capture_service *capture = NULL; - events_data_lst_t capture_fifo_events; + event_serialized_lst_t capture_fifo_events; last_events_t capture_last_events; SWSS_LOG_ERROR("Eventd service starting\n"); @@ -329,7 +338,7 @@ run_eventd_service() while(true) { int code, resp = -1; - events_data_lst_t req_data, resp_data; + event_serialized_lst_t req_data, resp_data; RET_ON_ERR(service.channel_read(code, req_data) == 0, "Failed to read request"); @@ -340,7 +349,7 @@ run_eventd_service() if (capture != NULL) { delete capture; } - events_data_lst_t().swap(capture_fifo_events); + event_serialized_lst_t().swap(capture_fifo_events); last_events_t().swap(capture_last_events); capture = new capture_service(zctx, cache_max); @@ -401,7 +410,7 @@ run_eventd_service() back_inserter(resp_data)); if (sz == VEC_SIZE(capture_fifo_events)) { - events_data_lst_t().swap(capture_fifo_events); + event_serialized_lst_t().swap(capture_fifo_events); } else { capture_fifo_events.erase(capture_fifo_events.begin(), it); } diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h index 753615699b8f..263e504af29f 100644 --- a/src/sonic-eventd/src/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -3,7 +3,7 @@ */ #include "events_service.h" -typedef map last_events_t; +typedef map last_events_t; /* * Started by eventd_service. @@ -93,31 +93,31 @@ typedef enum { class capture_service { public: - capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_socket(NULL), + capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_cap_run(false), m_ctrl(NEED_INIT), m_cache_max(cache_max) {} ~capture_service(); - int set_control(capture_control_t ctrl, events_data_lst_t *p=NULL); + int set_control(capture_control_t ctrl, event_serialized_lst_t *p=NULL); - int read_cache(events_data_lst_t &lst_fifo, + int read_cache(event_serialized_lst_t &lst_fifo, last_events_t &lst_last); private: - void init_capture_cache(const events_data_lst_t &lst); + void init_capture_cache(const event_serialized_lst_t &lst); void do_capture(); void stop_capture(); void *m_ctx; - void *m_socket; + bool m_cap_run; capture_control_t m_ctrl; thread m_thr; int m_cache_max; - events_data_lst_t m_events; + event_serialized_lst_t m_events; last_events_t m_last_events; diff --git a/src/sonic-eventd/src/main.cpp b/src/sonic-eventd/src/main.cpp index 67026b070182..fd14e64bc81e 100644 --- a/src/sonic-eventd/src/main.cpp +++ b/src/sonic-eventd/src/main.cpp @@ -1,3 +1,4 @@ +#include "logger.h" void run_eventd_service(); diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 9be6369b8468..258f0a74d5f2 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -12,9 +12,152 @@ using namespace std; -typedef vector lst_events_t; +#define ARRAY_SIZE(p) ((int)(sizeof(p) / sizeof((p)[0]))) -void run_sub(void *zctx, bool &term, string &read_source, lst_events_t &lst) +typedef struct { + int id; + string source; + string tag; + string rid; + string seq; + event_params_t params; + int missed_cnt; +} test_data_t; + +internal_event_t create_ev(const test_data_t &data) +{ + internal_event_t event_data; + + { + string param_str; + + EXPECT_EQ(0, serialize(data.params, param_str)); + + map_str_str_t event_str_map = { { data.source + ":" + data.tag, param_str}}; + + EXPECT_EQ(0, serialize(event_str_map, event_data[EVENT_STR_DATA])); + } + + event_data[EVENT_RUNTIME_ID] = data.rid; + event_data[EVENT_SEQUENCE] = data.seq; + + return event_data; +} + +/* Mock test data with event parameters and expected missed count */ +static const test_data_t ldata[] = { + { + 0, + "source0", + "tag0", + "guid-0", + "1", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 0 + }, + { + 1, + "source0", + "tag1", + "guid-1", + "100", + {{"ip", "10.10.27.10"}, {"state", "down"}}, + 0 + }, + { + 2, + "source1", + "tag2", + "guid-2", + "101", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 0 + }, + { + 3, + "source0", + "tag3", + "guid-1", + "105", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 4 + }, + { + 4, + "source0", + "tag4", + "guid-0", + "2", + {{"ip", "10.10.20.10"}, {"state", "down"}}, + 0 + }, + { + 5, + "source1", + "tag5", + "guid-2", + "110", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 8 + }, + { + 6, + "source0", + "tag0", + "guid-0", + "5", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 2 + }, + { + 7, + "source0", + "tag1", + "guid-1", + "106", + {{"ip", "10.10.27.10"}, {"state", "down"}}, + 0 + }, + { + 8, + "source1", + "tag2", + "guid-2", + "111", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 0 + }, + { + 9, + "source0", + "tag3", + "guid-1", + "109", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 2 + }, + { + 10, + "source0", + "tag4", + "guid-0", + "6", + {{"ip", "10.10.20.10"}, {"state", "down"}}, + 0 + }, + { + 11, + "source1", + "tag5", + "guid-2", + "119", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 7 + }, +}; + + +void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t &lst) { void *mock_sub = zmq_socket (zctx, ZMQ_SUB); string source; @@ -34,6 +177,7 @@ void run_sub(void *zctx, bool &term, string &read_source, lst_events_t &lst) } zmq_close(mock_sub); + printf("run_sub exited\n"); } void *init_pub(void *zctx) @@ -45,33 +189,26 @@ void *init_pub(void *zctx) return mock_pub; } -void run_pub(void *mock_pub, const string wr_source, lst_events_t &lst) +void run_pub(void *mock_pub, const string wr_source, internal_events_lst_t &lst) { - for(lst_events_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { + for(internal_events_lst_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { EXPECT_EQ(0, zmq_message_send(mock_pub, wr_source, *itc)); } + printf("Published %d events\n", (int)lst.size()); } -static internal_event_t -create_ev(const string rid, sequence_t n, const string d) -{ - stringstream ss; - - ss << d << ":" << n; - - return internal_event_t({ {EVENT_STR_DATA, ss.str()}, - { EVENT_RUNTIME_ID, rid }, { EVENT_SEQUENCE, seq_to_str(n) }}); -} - - - TEST(eventd, proxy) { - printf("TEST started\n"); + printf("PROxy TEST started\n"); + { + /* Direct log messages to stdout */ + string dummy, op("STDOUT"); + swss::Logger::swssOutputNotify(dummy, op); + } bool term_sub = false; string rd_source, wr_source("hello"); - lst_events_t rd_evts, wr_evts; + internal_events_lst_t rd_evts, wr_evts; void *zctx = zmq_ctx_new(); EXPECT_TRUE(NULL != zctx); @@ -91,8 +228,10 @@ TEST(eventd, proxy) /* Provide time for async connect to complete */ this_thread::sleep_for(chrono::milliseconds(100)); + EXPECT_TRUE(5 < ARRAY_SIZE(ldata)); + for(int i=0; i<5; ++i) { - wr_evts.push_back(create_ev("hello", i, "test body")); + wr_evts.push_back(create_ev(ldata[i])); } EXPECT_TRUE(rd_evts.empty()); @@ -101,17 +240,126 @@ TEST(eventd, proxy) /* Publish events. */ run_pub(mock_pub, wr_source, wr_evts); - while(rd_evts.size() != wr_evts.size()) { + for(int i=0; (rd_evts.size() != wr_evts.size()) && (i < 100); ++i) { + /* Loop & wait for atmost a second */ printf("rd_evts.size != wr_evts.size %d != %d\n", (int)rd_evts.size(), (int)wr_evts.size()); this_thread::sleep_for(chrono::milliseconds(10)); } + delete pxy; + pxy = NULL; + term_sub = true; - printf("Waiting for sub thread to join...\n"); thr.join(); zmq_close(mock_pub); zmq_ctx_term(zctx); + printf("eventd_proxy is tested GOOD\n"); +} + + +TEST(eventd, capture) +{ + printf("Capture TEST started\n"); + { + /* Direct log messages to stdout */ + string dummy, op("STDOUT"); + swss::Logger::swssOutputNotify(dummy, op); + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); + } + + string wr_source("hello"); + internal_events_lst_t wr_evts; + int init_cache = 3; + int cache_max = init_cache + 3; + event_serialized_lst_t evts_start, evts_expect, evts_read; + last_events_t last_evts_exp, last_evts_read; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run the proxy; Capture service reads from proxy */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max); + + /* Expect START_CAPTURE */ + EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); + + EXPECT_TRUE(init_cache > 1); + EXPECT_TRUE((cache_max+3) < ARRAY_SIZE(ldata)); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + evts_expect.push_back(evt_str); + } + + /* + * Collect events to publish for capture to cache + * re-publishing some events sent in cache. + */ + for(int i=1; i < ARRAY_SIZE(ldata); ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + + serialize(ev, evt_str); + + wr_evts.push_back(ev); + + if (i < cache_max) { + evts_expect.push_back(evt_str); + } else { + /* collect last entries for overflow */ + last_evts_exp[ldata[i].rid] = evt_str; + } + } + + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + /* Provide time for async connect to complete */ + this_thread::sleep_for(chrono::milliseconds(4000)); + + /* Publish events from 1 to all. */ + run_pub(mock_pub, wr_source, wr_evts); + + /* Provide time for async message receive. */ + this_thread::sleep_for(chrono::milliseconds(100)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + printf("-------------- OK\n"); +#if 0 + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); + + + EXPECT_EQ(evts_read, evts_expect); + EXPECT_EQ(last_evts_read, last_evts_exp); + + delete pxy; + pxy = NULL; + + delete pcap; + pcap = NULL; + + zmq_close(mock_pub); + zmq_ctx_term(zctx); +#endif + printf("eventd_proxy is tested GOOD\n"); } From b854f07d6c4d92cd960fce961f85419ac780cd33 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 9 Jun 2022 05:47:50 +0000 Subject: [PATCH 17/66] cap added in pub --- src/sonic-eventd/src/eventd.cpp | 28 ++++++++--- src/sonic-eventd/tests/eventd_ut.cpp | 73 +++++++++++++++++++++++----- 2 files changed, 83 insertions(+), 18 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 6099b9d8ef09..bd9d34bea6b3 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -123,6 +123,7 @@ capture_service::init_capture_cache(const event_serialized_lst_t &lst) if (deserialize(*itc, event) == 0) { if (validate_event(event, rid, seq)) { m_pre_exist_id[rid] = seq; + printf("init_capture_cache: rid=%s seq=%d\n", rid.c_str(), seq); m_events.push_back(*itc); } } @@ -130,6 +131,7 @@ capture_service::init_capture_cache(const event_serialized_lst_t &lst) SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); } } + printf("init_capture_cache: lst=%d m_events=%d\n", (int)lst.size(), (int)m_events.size()); } @@ -142,7 +144,7 @@ capture_service::do_capture() int block_ms=100; internal_event_t event; string source, evt_str; - chrono::steady_clock::time_point start; + int init_cnt; void *sock = NULL; sock = zmq_socket(m_ctx, ZMQ_SUB); @@ -164,33 +166,40 @@ capture_service::do_capture() this_thread::sleep_for(chrono::milliseconds(10)); } - /* Check read events against provided cache for 2 seconds to skip */ - start = chrono::steady_clock::now(); - while((m_ctrl == START_CAPTURE) && !m_pre_exist_id.empty()) { + /* Check read events against provided cache until as many events are read.*/ + init_cnt = (int)m_events.size(); + while((m_ctrl == START_CAPTURE) && !m_pre_exist_id.empty() && (init_cnt > 0)) { if (zmq_message_read(sock, 0, source, event) == -1) { RET_ON_ERR(zerrno == EAGAIN, "0:Failed to read from capture socket"); } else if (validate_event(event, rid, seq)) { - + init_cnt--; serialize(event, evt_str); pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); + printf("do_capture pre: rid=%s seq=%d\n", rid.c_str(), seq); if (it != m_pre_exist_id.end()) { if (seq > it->second) { m_events.push_back(evt_str); } + else { + printf("skipped. Found seq=%d\n", it->second); + } if (seq >= it->second) { m_pre_exist_id.erase(it); } } } - if(chrono::steady_clock::now() - start > chrono::seconds(2)) - break; + else { + printf("do_capture pre: event failed to validate\n"); + } } pre_exist_id_t().swap(m_pre_exist_id); + printf("after pre-exist m_events=%d max=%d m_ctrl=%d\n", (int)m_events.size(), m_cache_max, m_ctrl); + /* Save until max allowed */ while((m_ctrl == START_CAPTURE) && (VEC_SIZE(m_events) < m_cache_max)) { @@ -199,6 +208,7 @@ capture_service::do_capture() "1: Failed to read from capture socket"); } else if (validate_event(event, rid, seq)) { + printf("do_capture main: rid=%s seq=%d\n", rid.c_str(), seq); serialize(event, evt_str); try { @@ -213,7 +223,11 @@ capture_service::do_capture() break; } } + else { + printf("do_capture main: event failed to validate\n"); + } } + printf("after main m_events=%d max=%d m_ctrl=%d\n", (int)m_events.size(), m_cache_max, m_ctrl); /* Save only last event per sender */ diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 258f0a74d5f2..89cb0892330a 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -157,7 +157,42 @@ static const test_data_t ldata[] = { }; -void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t &lst) +void run_cap(void *zctx, bool &term, string &read_source, + int &cnt) +{ + void *mock_cap = zmq_socket (zctx, ZMQ_SUB); + string source; + internal_event_t ev_int; + int block_ms = 200; + int i=0; + + EXPECT_TRUE(NULL != mock_cap); + EXPECT_EQ(0, zmq_connect(mock_cap, get_config(CAPTURE_END_KEY).c_str())); + EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_SUBSCRIBE, "", 0)); + EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); + + while(!term) { + zmq_msg_t source, data; + zmq_msg_init(&source); + zmq_msg_init(&data); + int rc = zmq_msg_recv(&source, mock_cap, 0); + if (rc != -1) { + rc = zmq_msg_recv(&data, mock_cap, 0); + } + if (rc != -1) { + cnt = ++i; + } + else { + printf("CAP failed rc=%d zerrno=%d\n", rc, zerrno); + } + } + + zmq_close(mock_cap); + printf("run_cap exited =%d\n", cnt); +} + +void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t &lst, + int &cnt) { void *mock_sub = zmq_socket (zctx, ZMQ_SUB); string source; @@ -173,11 +208,12 @@ void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { lst.push_back(ev_int); read_source.swap(source); + cnt = (int)lst.size(); } } zmq_close(mock_sub); - printf("run_sub exited\n"); + printf("run_sub exited =%d\n", (int)lst.size()); } void *init_pub(void *zctx) @@ -207,8 +243,11 @@ TEST(eventd, proxy) swss::Logger::swssOutputNotify(dummy, op); } bool term_sub = false; - string rd_source, wr_source("hello"); + bool term_cap = false; + string rd_csource, rd_source, wr_source("hello"); internal_events_lst_t rd_evts, wr_evts; + int rd_evts_sz = 0, rd_cevts_sz = 0; + int wr_sz; void *zctx = zmq_ctx_new(); EXPECT_TRUE(NULL != zctx); @@ -220,7 +259,10 @@ TEST(eventd, proxy) EXPECT_EQ(0, pxy->init()); /* subscriber in a thread */ - thread thr(&run_sub, zctx, ref(term_sub), ref(rd_source), ref(rd_evts)); + thread thr(&run_sub, zctx, ref(term_sub), ref(rd_source), ref(rd_evts), ref(rd_evts_sz)); + + /* capture in a thread */ + thread thrc(&run_cap, zctx, ref(term_cap), ref(rd_csource), ref(rd_cevts_sz)); /* Init pub connection */ void *mock_pub = init_pub(zctx); @@ -240,25 +282,35 @@ TEST(eventd, proxy) /* Publish events. */ run_pub(mock_pub, wr_source, wr_evts); - for(int i=0; (rd_evts.size() != wr_evts.size()) && (i < 100); ++i) { + wr_sz = (int)wr_evts.size(); + for(int i=0; (wr_sz != rd_evts_sz) && (i < 100); ++i) { /* Loop & wait for atmost a second */ printf("rd_evts.size != wr_evts.size %d != %d\n", (int)rd_evts.size(), (int)wr_evts.size()); this_thread::sleep_for(chrono::milliseconds(10)); } + this_thread::sleep_for(chrono::milliseconds(1000)); delete pxy; pxy = NULL; term_sub = true; + term_cap = true; thr.join(); + thrc.join(); + printf("wr=%d rd=%d rdc=%d\n", (int)wr_evts.size(), (int)rd_evts.size(), + rd_cevts_sz); + EXPECT_EQ(rd_evts.size(), wr_evts.size()); + EXPECT_EQ(rd_cevts_sz, wr_evts.size()); + zmq_close(mock_pub); zmq_ctx_term(zctx); printf("eventd_proxy is tested GOOD\n"); } +#if 0 TEST(eventd, capture) { printf("Capture TEST started\n"); @@ -317,7 +369,8 @@ TEST(eventd, capture) wr_evts.push_back(ev); if (i < cache_max) { - evts_expect.push_back(evt_str); + if (i >= init_cache) + evts_expect.push_back(evt_str); } else { /* collect last entries for overflow */ last_evts_exp[ldata[i].rid] = evt_str; @@ -337,17 +390,15 @@ TEST(eventd, capture) run_pub(mock_pub, wr_source, wr_evts); /* Provide time for async message receive. */ - this_thread::sleep_for(chrono::milliseconds(100)); + this_thread::sleep_for(chrono::milliseconds(4000)); /* Stop capture, closes socket & terminates the thread */ EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); - printf("-------------- OK\n"); -#if 0 /* Read the cache */ EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); - + printf("evts_read=%d evts_expect=%d\n", (int)evts_read.size(), (int)evts_expect.size()); EXPECT_EQ(evts_read, evts_expect); EXPECT_EQ(last_evts_read, last_evts_exp); @@ -359,7 +410,7 @@ TEST(eventd, capture) zmq_close(mock_pub); zmq_ctx_term(zctx); -#endif printf("eventd_proxy is tested GOOD\n"); } +#endif From ef1325a08470f2fca334c4e4cc411bb5ced73857 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 9 Jun 2022 18:14:11 +0000 Subject: [PATCH 18/66] Added one moe test --- src/sonic-eventd/src/eventd.cpp | 38 ++++++++++------------ src/sonic-eventd/src/eventd.h | 3 +- src/sonic-eventd/tests/eventd_ut.cpp | 48 +++++++++++++++++----------- 3 files changed, 49 insertions(+), 40 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index bd9d34bea6b3..b500441cb8ca 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -123,7 +123,6 @@ capture_service::init_capture_cache(const event_serialized_lst_t &lst) if (deserialize(*itc, event) == 0) { if (validate_event(event, rid, seq)) { m_pre_exist_id[rid] = seq; - printf("init_capture_cache: rid=%s seq=%d\n", rid.c_str(), seq); m_events.push_back(*itc); } } @@ -131,7 +130,6 @@ capture_service::init_capture_cache(const event_serialized_lst_t &lst) SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); } } - printf("init_capture_cache: lst=%d m_events=%d\n", (int)lst.size(), (int)m_events.size()); } @@ -141,7 +139,7 @@ capture_service::do_capture() int rc; runtime_id_t rid; sequence_t seq; - int block_ms=100; + int block_ms=300; internal_event_t event; string source, evt_str; int init_cnt; @@ -175,31 +173,26 @@ capture_service::do_capture() "0:Failed to read from capture socket"); } else if (validate_event(event, rid, seq)) { + bool add = true; init_cnt--; serialize(event, evt_str); pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); - printf("do_capture pre: rid=%s seq=%d\n", rid.c_str(), seq); if (it != m_pre_exist_id.end()) { - if (seq > it->second) { - m_events.push_back(evt_str); - } - else { - printf("skipped. Found seq=%d\n", it->second); + if (seq <= it->second) { + add = false; } if (seq >= it->second) { m_pre_exist_id.erase(it); } } - } - else { - printf("do_capture pre: event failed to validate\n"); + if (add) { + m_events.push_back(evt_str); + } } } pre_exist_id_t().swap(m_pre_exist_id); - printf("after pre-exist m_events=%d max=%d m_ctrl=%d\n", (int)m_events.size(), m_cache_max, m_ctrl); - /* Save until max allowed */ while((m_ctrl == START_CAPTURE) && (VEC_SIZE(m_events) < m_cache_max)) { @@ -208,7 +201,6 @@ capture_service::do_capture() "1: Failed to read from capture socket"); } else if (validate_event(event, rid, seq)) { - printf("do_capture main: rid=%s seq=%d\n", rid.c_str(), seq); serialize(event, evt_str); try { @@ -223,15 +215,14 @@ capture_service::do_capture() break; } } - else { - printf("do_capture main: event failed to validate\n"); - } } - printf("after main m_events=%d max=%d m_ctrl=%d\n", (int)m_events.size(), m_cache_max, m_ctrl); + /* Clear the map, created to ensure memory space available */ + m_last_events.clear(); + m_last_events_init = true; /* Save only last event per sender */ - while((m_ctrl == START_CAPTURE)) { + while(m_ctrl == START_CAPTURE) { if (zmq_message_read(sock, 0, source, event) == -1) { RET_ON_ERR(zerrno == EAGAIN, @@ -242,6 +233,7 @@ capture_service::do_capture() m_last_events[rid] = evt_str; } } + out: /* * Capture stop will close the socket which fail the read @@ -317,7 +309,11 @@ capture_service::read_cache(event_serialized_lst_t &lst_fifo, last_events_t &lst_last) { lst_fifo.swap(m_events); - lst_last.swap(m_last_events); + if (m_last_events_init) { + lst_last.swap(m_last_events); + } else { + last_events_t().swap(lst_last); + } last_events_t().swap(m_last_events); event_serialized_lst_t().swap(m_events); return 0; diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h index 263e504af29f..b5a7f9a2ab5e 100644 --- a/src/sonic-eventd/src/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -94,7 +94,7 @@ class capture_service { public: capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_cap_run(false), - m_ctrl(NEED_INIT), m_cache_max(cache_max) + m_ctrl(NEED_INIT), m_cache_max(cache_max), m_last_events_init(false) {} ~capture_service(); @@ -120,6 +120,7 @@ class capture_service event_serialized_lst_t m_events; last_events_t m_last_events; + bool m_last_events_init; typedef map pre_exist_id_t; pre_exist_id_t m_pre_exist_id; diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 89cb0892330a..d1668178ca0e 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -182,13 +182,9 @@ void run_cap(void *zctx, bool &term, string &read_source, if (rc != -1) { cnt = ++i; } - else { - printf("CAP failed rc=%d zerrno=%d\n", rc, zerrno); - } } zmq_close(mock_cap); - printf("run_cap exited =%d\n", cnt); } void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t &lst, @@ -213,7 +209,6 @@ void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t } zmq_close(mock_sub); - printf("run_sub exited =%d\n", (int)lst.size()); } void *init_pub(void *zctx) @@ -230,7 +225,6 @@ void run_pub(void *mock_pub, const string wr_source, internal_events_lst_t &lst) for(internal_events_lst_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { EXPECT_EQ(0, zmq_message_send(mock_pub, wr_source, *itc)); } - printf("Published %d events\n", (int)lst.size()); } @@ -285,8 +279,6 @@ TEST(eventd, proxy) wr_sz = (int)wr_evts.size(); for(int i=0; (wr_sz != rd_evts_sz) && (i < 100); ++i) { /* Loop & wait for atmost a second */ - printf("rd_evts.size != wr_evts.size %d != %d\n", - (int)rd_evts.size(), (int)wr_evts.size()); this_thread::sleep_for(chrono::milliseconds(10)); } this_thread::sleep_for(chrono::milliseconds(1000)); @@ -299,8 +291,6 @@ TEST(eventd, proxy) thr.join(); thrc.join(); - printf("wr=%d rd=%d rdc=%d\n", (int)wr_evts.size(), (int)rd_evts.size(), - rd_cevts_sz); EXPECT_EQ(rd_evts.size(), wr_evts.size()); EXPECT_EQ(rd_cevts_sz, wr_evts.size()); @@ -310,7 +300,6 @@ TEST(eventd, proxy) } -#if 0 TEST(eventd, capture) { printf("Capture TEST started\n"); @@ -321,10 +310,24 @@ TEST(eventd, capture) swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); } + /* + * Need to run subscriber; Else publisher would skip publishing + * in the absence of any subscriber. + */ + bool term_sub = false; + string sub_source; + int sub_evts_sz; + internal_events_lst_t sub_evts; + + /* run_pub details */ string wr_source("hello"); internal_events_lst_t wr_evts; - int init_cache = 3; - int cache_max = init_cache + 3; + + /* capture related */ + int init_cache = 3; /* provided along with start capture */ + int cache_max = init_cache + 3; /* capture service cache max */ + + /* startup strings; expected list & read list from capture */ event_serialized_lst_t evts_start, evts_expect, evts_read; last_events_t last_evts_exp, last_evts_read; @@ -338,6 +341,9 @@ TEST(eventd, capture) /* Starting proxy */ EXPECT_EQ(0, pxy->init()); + /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); + /* Create capture service */ capture_service *pcap = new capture_service(zctx, cache_max); @@ -369,8 +375,10 @@ TEST(eventd, capture) wr_evts.push_back(ev); if (i < cache_max) { - if (i >= init_cache) + if (i >= init_cache) { + /* for i < init_cache, evts_expect is already populated */ evts_expect.push_back(evt_str); + } } else { /* collect last entries for overflow */ last_evts_exp[ldata[i].rid] = evt_str; @@ -384,21 +392,23 @@ TEST(eventd, capture) void *mock_pub = init_pub(zctx); /* Provide time for async connect to complete */ - this_thread::sleep_for(chrono::milliseconds(4000)); + this_thread::sleep_for(chrono::milliseconds(200)); /* Publish events from 1 to all. */ run_pub(mock_pub, wr_source, wr_evts); /* Provide time for async message receive. */ - this_thread::sleep_for(chrono::milliseconds(4000)); + this_thread::sleep_for(chrono::milliseconds(100)); /* Stop capture, closes socket & terminates the thread */ EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + /* terminate subs thread */ + term_sub = true; + /* Read the cache */ EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); - printf("evts_read=%d evts_expect=%d\n", (int)evts_read.size(), (int)evts_expect.size()); EXPECT_EQ(evts_read, evts_expect); EXPECT_EQ(last_evts_read, last_evts_exp); @@ -408,9 +418,11 @@ TEST(eventd, capture) delete pcap; pcap = NULL; + thr_sub.join(); + zmq_close(mock_pub); zmq_ctx_term(zctx); printf("eventd_proxy is tested GOOD\n"); } -#endif + From 338766b84a50c8903277798fad97bc03198ad8e1 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 9 Jun 2022 21:36:40 +0000 Subject: [PATCH 19/66] test is complete --- src/sonic-eventd/src/eventd.cpp | 10 +- src/sonic-eventd/tests/eventd_ut.cpp | 200 ++++++++++++++++++++++++++- 2 files changed, 207 insertions(+), 3 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index b500441cb8ca..6fd433bba9ea 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -323,6 +323,7 @@ capture_service::read_cache(event_serialized_lst_t &lst_fifo, void run_eventd_service() { + int code = 0; int cache_max; event_service service; eventd_proxy *proxy = NULL; @@ -346,8 +347,8 @@ run_eventd_service() RET_ON_ERR(service.init_server(zctx) == 0, "Failed to init service"); - while(true) { - int code, resp = -1; + while(code != EVENT_EXIT) { + int resp = -1; event_serialized_lst_t req_data, resp_data; RET_ON_ERR(service.channel_read(code, req_data) == 0, @@ -432,6 +433,11 @@ run_eventd_service() case EVENT_ECHO: resp = 0; resp_data.swap(req_data); + break; + + case EVENT_EXIT: + resp = 0; + break; default: SWSS_LOG_ERROR("Unexpected request: %d", code); diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index d1668178ca0e..df8b7824e2cb 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -172,6 +172,10 @@ void run_cap(void *zctx, bool &term, string &read_source, EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); while(!term) { + /* + * Don't call zmq_message_read as that is not thread safe + * Subscriber thread is already calling. + */ zmq_msg_t source, data; zmq_msg_init(&source); zmq_msg_init(&data); @@ -422,7 +426,201 @@ TEST(eventd, capture) zmq_close(mock_pub); zmq_ctx_term(zctx); - printf("eventd_proxy is tested GOOD\n"); + printf("Capture TEST completed\n"); +} + +TEST(eventd, captureCacheMax) +{ + printf("Capture TEST with matchinhg cache-max started\n"); + { + /* Direct log messages to stdout */ + string dummy, op("STDOUT"); + swss::Logger::swssOutputNotify(dummy, op); + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); + } + + /* + * Need to run subscriber; Else publisher would skip publishing + * in the absence of any subscriber. + */ + bool term_sub = false; + string sub_source; + int sub_evts_sz; + internal_events_lst_t sub_evts; + + /* run_pub details */ + string wr_source("hello"); + internal_events_lst_t wr_evts; + + /* capture related */ + int init_cache = 4; /* provided along with start capture */ + int cache_max = ARRAY_SIZE(ldata); /* capture service cache max */ + + /* startup strings; expected list & read list from capture */ + event_serialized_lst_t evts_start, evts_expect, evts_read; + last_events_t last_evts_read; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run the proxy; Capture service reads from proxy */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max); + + /* Expect START_CAPTURE */ + EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); + + EXPECT_TRUE(init_cache > 1); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + evts_expect.push_back(evt_str); + } + + /* + * Collect events to publish for capture to cache + * re-publishing some events sent in cache. + */ + for(int i=1; i < ARRAY_SIZE(ldata); ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + + serialize(ev, evt_str); + + wr_evts.push_back(ev); + + if (i >= init_cache) { + /* for i < init_cache, evts_expect is already populated */ + evts_expect.push_back(evt_str); + } + } + + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + /* Provide time for async connect to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Publish events from 1 to all. */ + run_pub(mock_pub, wr_source, wr_evts); + + /* Provide time for async message receive. */ + this_thread::sleep_for(chrono::milliseconds(100)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + /* terminate subs thread */ + term_sub = true; + + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); + + EXPECT_EQ(evts_read, evts_expect); + EXPECT_TRUE(last_evts_read.empty()); + + delete pxy; + pxy = NULL; + + delete pcap; + pcap = NULL; + + thr_sub.join(); + + zmq_close(mock_pub); + zmq_ctx_term(zctx); + printf("Capture TEST with matchinhg cache-max completed\n"); } +TEST(eventd, service) +{ + /* + * Don't PUB/SUB events as main run_eventd_service itself + * is using zmq_message_read. Any PUB/SUB will cause + * eventd's do_capture running in another thread to call + * zmq_message_read, which will crash as boost:archive is + * not thread safe. + * TEST(eventd, capture) has already tested caching. + */ + printf("Service TEST started\n"); + { + /* Direct log messages to stdout */ + string dummy, op("STDOUT"); + swss::Logger::swssOutputNotify(dummy, op); + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); + } + + /* capture related */ + int init_cache = 4; /* provided along with start capture */ + + /* startup strings; expected list & read list from capture */ + event_serialized_lst_t evts_start, evts_read; + + event_service service; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* + * Start the eventd server side service + * It runs proxy & capture service + * It uses its own zmq context + */ + thread thrSvc(&run_eventd_service); + + /* Need client side service to interact with server side */ + EXPECT_EQ(0, service.init_client(zctx)); + + EXPECT_EQ(-1, service.cache_stop()); + + EXPECT_TRUE(init_cache > 1); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + } + + + EXPECT_EQ(0, service.cache_init()); + EXPECT_EQ(0, service.cache_start(evts_start)); + + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, service.cache_stop()); + + /* Read the cache */ + EXPECT_EQ(0, service.cache_read(evts_read)); + + EXPECT_EQ(evts_read, evts_start); + + EXPECT_EQ(0, service.send_recv(EVENT_EXIT)); + + service.close_service(); + + thrSvc.join(); + + zmq_ctx_term(zctx); + printf("Service TEST completed\n"); +} + From 0f64cc8569356f14bef7088cefbbb5dae02ec324 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 9 Jun 2022 21:48:24 +0000 Subject: [PATCH 20/66] add UT to make --- src/sonic-eventd/Makefile | 2 ++ src/sonic-eventd/tests/eventd_ut.cpp | 39 ++++++++++++---------------- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index 3fe5a71b3f40..223631fac135 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -34,6 +34,8 @@ eventd-tests: $(TEST_OBJS) @echo 'Invoking: G++ Linker' $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) @echo 'Finished building target: $@' + $(EVENTD_TEST) + @echo 'Finished running tests' @echo ' ' install: diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index df8b7824e2cb..0fdd34b100fe 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -232,14 +232,22 @@ void run_pub(void *mock_pub, const string wr_source, internal_events_lst_t &lst) } +void debug_on() +{ + /* compile with -D DEBUG_TEST or add "#define DEBUG_TEST" to include. */ +#ifdef DEBUG_TEST + /* Direct log messages to stdout */ + string dummy, op("STDOUT"); + swss::Logger::swssOutputNotify(dummy, op); + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); +#endif +} + TEST(eventd, proxy) { + debug_on(); + printf("PROxy TEST started\n"); - { - /* Direct log messages to stdout */ - string dummy, op("STDOUT"); - swss::Logger::swssOutputNotify(dummy, op); - } bool term_sub = false; bool term_cap = false; string rd_csource, rd_source, wr_source("hello"); @@ -307,12 +315,7 @@ TEST(eventd, proxy) TEST(eventd, capture) { printf("Capture TEST started\n"); - { - /* Direct log messages to stdout */ - string dummy, op("STDOUT"); - swss::Logger::swssOutputNotify(dummy, op); - swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); - } + debug_on(); /* * Need to run subscriber; Else publisher would skip publishing @@ -432,12 +435,7 @@ TEST(eventd, capture) TEST(eventd, captureCacheMax) { printf("Capture TEST with matchinhg cache-max started\n"); - { - /* Direct log messages to stdout */ - string dummy, op("STDOUT"); - swss::Logger::swssOutputNotify(dummy, op); - swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); - } + debug_on(); /* * Need to run subscriber; Else publisher would skip publishing @@ -560,12 +558,7 @@ TEST(eventd, service) * TEST(eventd, capture) has already tested caching. */ printf("Service TEST started\n"); - { - /* Direct log messages to stdout */ - string dummy, op("STDOUT"); - swss::Logger::swssOutputNotify(dummy, op); - swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); - } + debug_on(); /* capture related */ int init_cache = 4; /* provided along with start capture */ From 5fe6502fe601fa3a968c73f977868f53d49c5a50 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Sat, 11 Jun 2022 17:44:37 +0000 Subject: [PATCH 21/66] Adding required script & services --- files/build_templates/eventd.service.j2 | 16 +++++++++++ .../build_templates/sonic_debian_extension.j2 | 1 + src/sonic-eventd/tests/eventd_ut.cpp | 27 +++++++++++++++---- 3 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 files/build_templates/eventd.service.j2 diff --git a/files/build_templates/eventd.service.j2 b/files/build_templates/eventd.service.j2 new file mode 100644 index 000000000000..3b5dfc9c8d05 --- /dev/null +++ b/files/build_templates/eventd.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=EVENTD container +Requires=updategraph.service +Requisite=swss.service +After=updategraph.service swss.service syncd.service +BindsTo=sonic.target +After=sonic.target +Before=ntp-config.service +StartLimitIntervalSec=1200 +StartLimitBurst=3 + +[Service] +ExecStartPre=/usr/bin/{{docker_container_name}}.sh start +ExecStart=/usr/bin/{{docker_container_name}}.sh wait +ExecStop=/usr/bin/{{docker_container_name}}.sh stop +RestartSec=30 diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index aebd6b2fcd0d..49dafc29d4ff 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -806,6 +806,7 @@ sudo LANG=C cp $SCRIPTS_DIR/bgp.sh $FILESYSTEM_ROOT/usr/local/bin/bgp.sh sudo LANG=C cp $SCRIPTS_DIR/teamd.sh $FILESYSTEM_ROOT/usr/local/bin/teamd.sh sudo LANG=C cp $SCRIPTS_DIR/lldp.sh $FILESYSTEM_ROOT/usr/local/bin/lldp.sh sudo LANG=C cp $SCRIPTS_DIR/radv.sh $FILESYSTEM_ROOT/usr/local/bin/radv.sh +sudo LANG=C cp $SCRIPTS_DIR/eventd.sh $FILESYSTEM_ROOT/usr/local/bin/eventd.sh sudo LANG=C cp $SCRIPTS_DIR/asic_status.sh $FILESYSTEM_ROOT/usr/local/bin/asic_status.sh sudo LANG=C cp $SCRIPTS_DIR/asic_status.py $FILESYSTEM_ROOT/usr/local/bin/asic_status.py diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 0fdd34b100fe..a70c7a1c9d3b 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -204,11 +204,18 @@ void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_SUBSCRIBE, "", 0)); EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); - while(!term) { - if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { - lst.push_back(ev_int); - read_source.swap(source); - cnt = (int)lst.size(); + if (cnt == 0) { + while(!term) { + if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { + lst.push_back(ev_int); + read_source.swap(source); + cnt = (int)lst.size(); + } + } + } + else { + while(!term) { + this_thread::sleep_for(chrono::milliseconds(100)); } } @@ -349,6 +356,11 @@ TEST(eventd, capture) EXPECT_EQ(0, pxy->init()); /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + /* + * Block sub from calling zmq_message_read as capture service is calling + * and zmq_message_read crashes on access from more than one thread. + */ + sub_evts_sz = -1; thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ @@ -469,6 +481,11 @@ TEST(eventd, captureCacheMax) EXPECT_EQ(0, pxy->init()); /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + /* + * Block sub from calling zmq_message_read as capture service is calling + * and zmq_message_read crashes on access from more than one thread. + */ + sub_evts_sz = -1; thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ From 1fd68a904489fce73cb8607b41d5df98ee0c59e1 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 13 Jun 2022 15:30:05 +0000 Subject: [PATCH 22/66] Added service file --- files/build_templates/eventd.service.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/build_templates/eventd.service.j2 b/files/build_templates/eventd.service.j2 index 3b5dfc9c8d05..dc250fe450eb 100644 --- a/files/build_templates/eventd.service.j2 +++ b/files/build_templates/eventd.service.j2 @@ -2,7 +2,7 @@ Description=EVENTD container Requires=updategraph.service Requisite=swss.service -After=updategraph.service swss.service syncd.service +After=updategraph.service BindsTo=sonic.target After=sonic.target Before=ntp-config.service From 9f9027b2558c8b8ceb0f5dc043282970ac16f6e5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 13 Jun 2022 16:10:30 +0000 Subject: [PATCH 23/66] Add eventd as feature --- files/build_templates/init_cfg.json.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/build_templates/init_cfg.json.j2 b/files/build_templates/init_cfg.json.j2 index b51aaf2f0c14..1eff7b50fa77 100644 --- a/files/build_templates/init_cfg.json.j2 +++ b/files/build_templates/init_cfg.json.j2 @@ -34,6 +34,7 @@ ("pmon", "enabled", false, "enabled"), ("radv", "enabled", false, "enabled"), ("snmp", "enabled", true, "enabled"), + ("eventd", "enabled", true, "enabled"), ("swss", "enabled", false, "enabled"), ("syncd", "enabled", false, "enabled"), ("teamd", "enabled", false, "enabled")] %} @@ -58,7 +59,7 @@ "has_per_asic_scope": {% if feature + '@.service' in installer_services.split(' ') %}true{% else %}false{% endif %}, "auto_restart": "{{autorestart}}", {%- if include_kubernetes == "y" %} -{%- if feature in ["lldp", "pmon", "radv", "snmp", "telemetry"] %} +{%- if feature in ["lldp", "pmon", "radv", "eventd", "snmp", "telemetry"] %} "set_owner": "kube", {% else %} "set_owner": "local", {% endif %} {% endif %} "high_mem_alert": "disabled" From d5b956222ecb8f1ad5f91ec11beb84bbc1a8442e Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 00:40:45 +0000 Subject: [PATCH 24/66] service starts fine; tool ready to test send/receive --- dockers/docker-eventd/Dockerfile.j2 | 1 + dockers/docker-eventd/eventd.sh | 3 - dockers/docker-eventd/supervisord.conf | 2 +- files/build_templates/eventd.service.j2 | 5 +- src/sonic-eventd/Makefile | 13 +- src/sonic-eventd/tests/subdir.mk | 2 +- src/sonic-eventd/tools/events_tool.cpp | 335 ++++++++++++++++++++++++ src/sonic-eventd/tools/sample_ip.json | 1 + src/sonic-eventd/tools/subdir.mk | 12 + 9 files changed, 365 insertions(+), 9 deletions(-) delete mode 100755 dockers/docker-eventd/eventd.sh create mode 100644 src/sonic-eventd/tools/events_tool.cpp create mode 100644 src/sonic-eventd/tools/sample_ip.json create mode 100644 src/sonic-eventd/tools/subdir.mk diff --git a/dockers/docker-eventd/Dockerfile.j2 b/dockers/docker-eventd/Dockerfile.j2 index 0a8fad29c358..8d935dc9f365 100644 --- a/dockers/docker-eventd/Dockerfile.j2 +++ b/dockers/docker-eventd/Dockerfile.j2 @@ -28,6 +28,7 @@ RUN apt-get clean -y && \ apt-get autoremove -y && \ rm -rf /debs +COPY ["start.sh", "/usr/bin/"] COPY ["supervisord.conf", "/etc/supervisor/conf.d/"] COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] COPY ["critical_processes", "/etc/supervisor"] diff --git a/dockers/docker-eventd/eventd.sh b/dockers/docker-eventd/eventd.sh deleted file mode 100755 index a26e7ffd6ef8..000000000000 --- a/dockers/docker-eventd/eventd.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -exec /usr/bin/eventd diff --git a/dockers/docker-eventd/supervisord.conf b/dockers/docker-eventd/supervisord.conf index be51f922c120..5d9a50bca2ae 100644 --- a/dockers/docker-eventd/supervisord.conf +++ b/dockers/docker-eventd/supervisord.conf @@ -41,7 +41,7 @@ dependent_startup_wait_for=rsyslogd:running [program:eventd] -command=/usr/bin/eventd +command=/usr/sbin/eventd priority=3 autostart=false autorestart=false diff --git a/files/build_templates/eventd.service.j2 b/files/build_templates/eventd.service.j2 index dc250fe450eb..0ad7f52ee83d 100644 --- a/files/build_templates/eventd.service.j2 +++ b/files/build_templates/eventd.service.j2 @@ -1,11 +1,9 @@ [Unit] Description=EVENTD container Requires=updategraph.service -Requisite=swss.service After=updategraph.service BindsTo=sonic.target After=sonic.target -Before=ntp-config.service StartLimitIntervalSec=1200 StartLimitBurst=3 @@ -14,3 +12,6 @@ ExecStartPre=/usr/bin/{{docker_container_name}}.sh start ExecStart=/usr/bin/{{docker_container_name}}.sh wait ExecStop=/usr/bin/{{docker_container_name}}.sh stop RestartSec=30 + +[Install] +WantedBy=sonic.target diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index 223631fac135..fa42b1bdf89c 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -1,6 +1,7 @@ RM := rm -rf EVENTD_TARGET := eventd EVENTD_TEST := tests/tests +EVENTD_TOOL := tools/events_tool CP := cp MKDIR := mkdir CC := g++ @@ -19,8 +20,9 @@ endif -include src/subdir.mk -include tests/subdir.mk +-include tools/subdir.mk -all: sonic-eventd eventd-tests +all: sonic-eventd eventd-tests eventd-tool sonic-eventd: $(OBJS) @echo 'Building target: $@' @@ -29,12 +31,19 @@ sonic-eventd: $(OBJS) @echo 'Finished building target: $@' @echo ' ' +eventd-tool: $(TOOL_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TOOL) $(TOOL_OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + eventd-tests: $(TEST_OBJS) @echo 'Building target: $@' @echo 'Invoking: G++ Linker' $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) @echo 'Finished building target: $@' - $(EVENTD_TEST) + @echo 'Blocked $(EVENTD_TEST)' @echo 'Finished running tests' @echo ' ' diff --git a/src/sonic-eventd/tests/subdir.mk b/src/sonic-eventd/tests/subdir.mk index a29131c57000..a251b4bea82a 100644 --- a/src/sonic-eventd/tests/subdir.mk +++ b/src/sonic-eventd/tests/subdir.mk @@ -7,6 +7,6 @@ C_DEPS += ./tests/eventd_ut.d ./tests/main.d tests/%.o: tests/%.cpp @echo 'Building file: $<' @echo 'Invoking: GCC C++ Compiler' - $(CC) -D__FILENAME__="$(subst src/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + $(CC) -D__FILENAME__="$(subst tests/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" @echo 'Finished building: $<' @echo ' ' diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp new file mode 100644 index 000000000000..3f63273709c9 --- /dev/null +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -0,0 +1,335 @@ +#include +#include +#include "events.h" +#include "events_common.h" + +#define ASSERT(res, m, ...) \ + if (!(res)) {\ + int _e = errno; \ + printf("Failed here %s:%d errno:%d zerrno:%d ", __FUNCTION__, __LINE__, _e, zmq_errno()); \ + printf(m, ##__VA_ARGS__); \ + printf("\n"); \ + exit(-1); } + + +typedef enum { + OP_INIT=0, + OP_SEND=1, + OP_RECV=2, + OP_SEND_RECV=3 //SEND|RECV +} op_t; + + +#define PRINT_CHUNK_SZ 2 + +/* + * Usage: + */ + +const char *s_usage = "\ +-s - To Send\n\ +-r - To receive\n\ +Note:\n\ + when both -s & -r are given:\n\ + it uses main thread to publish and fork a dedicated thread to receive.\n\ + The rest of the parameters except -w is used for send\n\ +\n\ +-n - Count of messages to send/receive. When both given, it is used as count to send\n\ + Default: 1 \n\ + A value of 0 implies unlimited\n\ +\n\ +-p - Count of milliseconds to pause between sends or receives. In send-recv mode, it only affects send.\n\ + Default: 0 implying no pause\n\ +\n\ + -i - List of JSON messages to send in a file, with each event/message\n\ + declared in a single line. When n is more than size of list, the list\n\ + is rotated upon completion.\n\ + e.g. '[ \n\ + { \"sonic-bgp:bgp-state\": { \"ip\": \"10.101.01.10\", \"ts\": \"2022-10-11T01:02:30.45567\", \"state\": \"up\" }}\n\ + { \"abc-xxx:yyy-zz\": { \"foo\": \"bar\", \"hello\":\"world\", \"ts\": \"2022-10-11T01:02:30.45567\"}}\n\ + { \"some-mod:some-tag\": {}}\n\ + ]\n\ + Default: \n\ +\n\ +-o - O/p file to write received events\n\ + Default: STDOUT\n"; + + +bool term_receive = false; + +void +do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt=0, int pause=0) +{ + int index=0, total_missed = 0; + ostream* fp = &cout; + ofstream fout; + + if (!outfile.empty()) { + fout.open(outfile); + if (!fout.fail()) { + fp = &fout; + printf("outfile=%s set\n", outfile.c_str()); + } + } + event_handle_t h = events_init_subscriber(false, 100, filter.empty() ? NULL : &filter); + ASSERT(h != NULL, "Failed to get subscriber handle"); + + while(!term_receive) { + string key; + event_params_t params; + map_str_str_t evt; + int missed_cnt=-1; + + int rc = event_receive(h, key, params, missed_cnt); + if (rc == -1) { + ASSERT(event_last_error() == EAGAIN, "Failed to receive rc=%d err=%d\n", + rc, event_last_error()); + continue; + } + ASSERT(!key.empty(), "received EMPTY key"); + ASSERT(missed_cnt >= 0, "MIssed count uninitialized"); + + total_missed += missed_cnt; + + evt[key] = map_to_str(params); + (*fp) << map_to_str(evt) << "\n"; + fp->flush(); + + if ((++index % PRINT_CHUNK_SZ) == 0) { + printf("Received index %d\n", index); + } + + if (cnt > 0) { + if (--cnt <= 0) { + break; + } + } + if (pause) { + /* Pause between two sends */ + this_thread::sleep_for(chrono::milliseconds(pause)); + } + } + + events_deinit_subscriber(h); + printf("Total received = %d missed = %dfile:%s\n", index, total_missed, + outfile.empty() ? "STDOUT" : outfile.c_str()); +} + + +#if 0 +test code +void +parse_file(const string infile) +{ + ifstream input(infile); + string line; + + while(getline( input, line )) { + const auto &data = nlohmann::json::parse(line); + printf("is_obj=%d is_arr=%d\n", data.is_object(), data.is_array()); + printf("size=%d\n", (int)data.size()); + string k(data.begin().key()); + printf("key=%s\n", k.c_str()); + const auto &val = data.begin().value(); + printf("val: is_obj=%d is_arr=%d\n", val.is_object(), val.is_array()); + printf("Params:\n"); + for(auto par_it = val.begin(); par_it != val.end(); par_it++) { + string k(par_it.key()); + string v(par_it.value()); + printf(" key=%s\n",k.c_str()); + printf(" val=%s\n", v.c_str()); + } + printf("------------------------------\n"); + } +} +#endif + + +int +do_send(const string infile, int cnt, int pause) +{ + typedef struct { + string tag; + event_params_t params; + } evt_t; + + typedef vector lst_t; + + lst_t lst; + string source; + event_handle_t h; + int index = 0; + + if (!infile.empty()) { + ifstream input(infile); + + /* Read infile into list of events, that are ready for send */ + for( string line; getline( input, line ); ) + { + evt_t evt; + string str_params; + + const auto &data = nlohmann::json::parse(line); + ASSERT(data.is_object(), "Parsed data is not object"); + ASSERT((int)data.size() == 1, "string parse size = %d", (int)data.size()); + + string key(data.begin().key()); + if (source.empty()) { + source = key.substr(0, key.find(":")); + } else { + ASSERT(source == key.substr(0, key.find(":")), "source:%s read=%s", + source.c_str(), key.substr(0, key.find(":")).c_str()); + } + evt.tag = key.substr(key.find(":")+1); + + const auto &val = data.begin().value(); + ASSERT(val.is_object(), "Parsed params is not object"); + ASSERT((int)val.size() >= 1, "Expect non empty params"); + + for(auto par_it = val.begin(); par_it != val.end(); par_it++) { + evt.params[string(par_it.key())] = string(par_it.value()); + } + lst.push_back(evt); + } + } + +#if 0 + cout << "Events to send\n"; + for(lst_t::const_iterator itc=lst.begin(); itc != lst.end(); ++itc) { + cout << "tag:" << itc->tag << " params:" << map_to_str(itc->params) << "\n"; + } + cout << "Events END\n"; +#endif + + if (lst.empty()) { + evt_t evt = { + "test-tag", + { + { "param1", "foo"}, + {"param2", "bar"} + } + }; + lst.push_back(evt); + } + + h = events_init_publisher(source); + ASSERT(h != NULL, "failed to init publisher"); + + /* cnt = 0 as i/p implies forever */ + + while(cnt >= 0) { + /* Keep resending the list until count is exhausted */ + for(lst_t::const_iterator itc = lst.begin(); (cnt >= 0) && (itc != lst.end()); ++itc) { + const evt_t &evt = *itc; + + if ((++index % PRINT_CHUNK_SZ) == 0) { + printf("Sending index %d\n", index); + } + + int rc = event_publish(h, evt.tag, evt.params.empty() ? NULL : &evt.params); + ASSERT(rc == 0, "Failed to publish index=%d", index); + + if ((cnt > 0) && (--cnt == 0)) { + /* set to termninate */ + cnt = -1; + } + else if (pause) { + /* Pause between two sends */ + this_thread::sleep_for(chrono::milliseconds(pause)); + } + } + } + + events_deinit_publisher(h); + printf("Sent %d events\n", index); + return 0; +} + +void usage() +{ + printf(s_usage); + exit(-1); +} + +int main(int argc, char **argv) +{ + int op = OP_INIT; + int cnt=0, pause=0; + string json_str_msg, outfile("STDOUT"), infile; + event_subscribe_sources_t filter; + + for(;;) + { + switch(getopt(argc, argv, "srn:p:i:o:f:")) // note the colon (:) to indicate that 'b' has a parameter and is not a switch + { + case 's': + op |= OP_SEND; + continue; + + case 'r': + op |= OP_RECV; + continue; + + case 'n': + cnt = stoi(optarg); + continue; + + case 'p': + pause = stoi(optarg); + continue; + + case 'i': + infile = optarg; + continue; + + case 'o': + outfile = optarg; + continue; + + case 'f': + { + stringstream ss(optarg); //create string stream from the string + while(ss.good()) { + string substr; + getline(ss, substr, ','); + filter.push_back(substr); + } + } + continue; + + case -1: + break; + + case '?': + case 'h': + default : + usage(); + break; + + } + break; + } + + + printf("op=%d n=%d pause=%d i=%s o=%s\n", + op, cnt, pause, infile.c_str(), outfile.c_str()); + + if (op == OP_SEND_RECV) { + thread thr(&do_receive, filter, outfile, 0, 0); + do_send(infile, cnt, pause); + } + else if (op == OP_SEND) { + do_send(infile, cnt, pause); + } + else if (op == OP_RECV) { + do_receive(filter, outfile, cnt, pause); + } + else { + ASSERT(false, "Elect -s for send or -r receive or both; Bailing out with no action\n"); + } + + + printf("--------- END: Good run -----------------\n"); + return 0; +} + diff --git a/src/sonic-eventd/tools/sample_ip.json b/src/sonic-eventd/tools/sample_ip.json new file mode 100644 index 000000000000..4c17073c3d91 --- /dev/null +++ b/src/sonic-eventd/tools/sample_ip.json @@ -0,0 +1 @@ +{"key-0": {"foo": "bar", "hello": "world" }} diff --git a/src/sonic-eventd/tools/subdir.mk b/src/sonic-eventd/tools/subdir.mk new file mode 100644 index 000000000000..5f13043dd612 --- /dev/null +++ b/src/sonic-eventd/tools/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +TOOL_OBJS = ./tools/events_tool.o + +C_DEPS += ./tools/events_tool.d + +tools/%.o: tools/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst tools/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' From abd6eb51fb3fd0cc4157e0f571f6459e5ef718f2 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 00:48:36 +0000 Subject: [PATCH 25/66] Add tool to Debian pkg --- src/sonic-eventd/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index fa42b1bdf89c..aecd5af294d3 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -50,6 +50,7 @@ eventd-tests: $(TEST_OBJS) install: $(MKDIR) -p $(DESTDIR)/usr/sbin $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + $(MV) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin deinstall: $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) From 999a17561d58ebb465c70c2cf2dedf2cf90e8c9a Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 14 Jun 2022 01:19:04 +0000 Subject: [PATCH 26/66] Add rsyslog plugin and tests --- src/sonic-eventd/rsyslog_plugin/main.cpp | 49 +++++++ .../rsyslog_plugin/rsyslog_plugin.cpp | 77 +++++++++++ .../rsyslog_plugin/rsyslog_plugin.h | 43 ++++++ .../rsyslog_plugin/syslog_parser.cpp | 49 +++++++ .../rsyslog_plugin/syslog_parser.h | 28 ++++ .../rsyslog_plugin_ut.cpp | 125 ++++++++++++++++++ .../rsyslog_plugin_tests/test_regex_1.rc.json | 1 + .../rsyslog_plugin_tests/test_regex_2.rc.json | 7 + .../rsyslog_plugin_tests/test_regex_3.rc.json | 6 + .../rsyslog_plugin_tests/test_regex_4.rc.json | 7 + .../rsyslog_plugin_tests/test_syslogs.txt | 4 + 11 files changed, 396 insertions(+) create mode 100644 src/sonic-eventd/rsyslog_plugin/main.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h create mode 100644 src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/syslog_parser.h create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt diff --git a/src/sonic-eventd/rsyslog_plugin/main.cpp b/src/sonic-eventd/rsyslog_plugin/main.cpp new file mode 100644 index 000000000000..6bf8862b73ff --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/main.cpp @@ -0,0 +1,49 @@ +#include +#include +#include "rsyslog_plugin.h" +#include "syslog_parser.h" + +using namespace std; + +void showUsage() { + cerr << "Usage for rsyslog_plugin: " << " SOURCES\n" + << "Options:\n" + << "\t-r,required,type=string\t\tPath to regex file" + << "\t-m,required,type=string\t\tYANG module name of source generating syslog message" + << endl; +} + +int main(int argc, char** argv) { + string regex_path; + string module_name; + int option_val; + + while((option_val = getopt(argc, argv, "r:m:")) != -1) { + switch(option_val) { + case 'r': + if(optarg != NULL) { + regex_path = optarg; + } + break; + case 'm': + if(optarg != NULL) { + module_name = optarg; + } + break; + default: + showUsage(); + return 1; + } + } + + if(regex_path.empty() || module_name.empty()) { // Missing required rc path + showUsage(); + return 1; + } + + SyslogParser* parser = new SyslogParser({}, json::array()); + RsyslogPlugin* plugin = new RsyslogPlugin(parser, module_name, regex_path); + + plugin->run(); + return 0; +} diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp new file mode 100644 index 000000000000..b6ff51355186 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -0,0 +1,77 @@ +#include +#include +#include +#include +#include +#include "rsyslog_plugin.h" +#include "common/logger.h" +#include "common/json.hpp" +#include "common/events.h" + +using namespace std; +using namespace swss; +using json = nlohmann::json; + +void RsyslogPlugin::onMessage(string msg) { + string tag = ""; + event_params_t param_dict; + if(!parser->parseMessage(msg, tag, param_dict)) { + SWSS_LOG_INFO("%s was not able to be parsed into a structured event\n", msg.c_str()); + } else { + int return_code = event_publish(fetchHandle(), tag, ¶m_dict); + if (return_code != 0) { + SWSS_LOG_INFO("rsyslog_plugin was not able to publish event for %s\n", tag.c_str()); + } + } +} + +[[noreturn]] void RsyslogPlugin::run() { + while(true) { + string line; + getline(cin, line); + if(line.empty()) { + continue; + } + onMessage(line); + } +} + +bool RsyslogPlugin::createRegexList() { + fstream regex_file; + regex_file.open(regex_path, ios::in); + if (!regex_file) { + SWSS_LOG_ERROR("No such path exists: %s for source %s\n", regex_path.c_str(), module_name.c_str()); + return false; + } + try { + regex_file >> parser->regex_list; + } catch (exception& exception) { + SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", regex_path.c_str(), exception.what()); + return false; + } + + string regex_string = ""; + regex expression; + + for(long unsigned int i = 0; i < parser->regex_list.size(); i++) { + try { + regex_string = parser->regex_list[i]["regex"]; + regex expr(regex_string); + expression = expr; + } catch (exception& exception) { + SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", exception.what()); + return false; + } + parser->expressions.push_back(expression); + } + regex_file.close(); + return true; +} + + +RsyslogPlugin::RsyslogPlugin(SyslogParser* syslog_parser, string mod_name, string path) { + parser = syslog_parser; + module_name = mod_name; + regex_path = path; + onInit(); +} diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h new file mode 100644 index 000000000000..631652cbb5af --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -0,0 +1,43 @@ +#ifndef RSYSLOG_PLUGIN_H +#define RSYSLOG_PLUGIN_H + +#include +#include +#include "syslog_parser.h" +#include "common/logger.h" +#include "common/events.h" + +using namespace std; +using json = nlohmann::json; + +/** + * Rsyslog Plugin will utilize an instance of a syslog parser to read syslog messages from rsyslog.d and will continuously read from stdin + * A plugin instance is created for each container/host. + * + */ + +class RsyslogPlugin { +public: + RsyslogPlugin(SyslogParser* syslog_parser, string mod_name, string path); + void onMessage(string msg); + void run(); + bool createRegexList(); + event_handle_t fetchHandle() { + return event_handle; + } + SyslogParser* parser; +private: + string regex_path; + string module_name; + event_handle_t event_handle; + + bool onInit() { + event_handle = events_init_publisher(module_name); + int return_code = createRegexList(); + return (event_handle != NULL || return_code == 0); + } + +}; + +#endif + diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp new file mode 100644 index 000000000000..fa56aeff2245 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -0,0 +1,49 @@ +#include +#include +#include +#include +#include "syslog_parser.h" +#include "common/logger.h" + + +using namespace std; +/** + * Parses syslog message and returns structured event + * + * @param nessage us syslog message being fed in by rsyslog.d + * @return return structured event json for publishing + * +*/ + +bool SyslogParser::parseMessage(string message, string& event_tag, event_params_t& param_map) { + for(long unsigned int i = 0; i < regex_list.size(); i++) { + smatch match_results; + regex_search(message, match_results, expressions[i]); + vector groups; + vector params; + try { + event_tag = regex_list[i]["tag"]; + vector p = regex_list[i]["params"]; + params = p; + } catch (exception& exception) { + SWSS_LOG_ERROR("Invalid regex list, throws exception: %s\n", exception.what()); + return false; + } + // first match in groups is entire message + for(long unsigned int j = 1; j < match_results.size(); j++) { + groups.push_back(match_results.str(j)); + } + if (groups.size() == params.size()) { // found matching regex + transform(params.begin(), params.end(), groups.begin(), inserter(param_map, param_map.end()), [](string a, string b) { + return make_pair(a,b); + }); + return true; + } + } + return false; +} + +SyslogParser::SyslogParser(vector regex_expressions, json list) { + expressions = regex_expressions; + regex_list = list; +} diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h new file mode 100644 index 000000000000..5e970f84fa9a --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -0,0 +1,28 @@ +#ifndef SYSLOG_PARSER_H +#define SYSLOG_PARSER_H + +#include +#include +#include +#include "common/json.hpp" +#include "common/events.h" + +using namespace std; +using json = nlohmann::json; + +/** + * Syslog Parser is responsible for parsing log messages fed by rsyslog.d and returns + * matched result to rsyslog_plugin to use with events publish API + * + */ + +class SyslogParser { +public: + SyslogParser(vector regex_expressions, json list); + bool parseMessage(string message, string& tag, event_params_t& param_dict); + + vector expressions; + json regex_list = json::array(); +}; + +#endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp new file mode 100644 index 000000000000..12f9a247acf5 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -0,0 +1,125 @@ +#include +#include +#include +#include "gtest/gtest.h" +#include "common/json.hpp" +#include "common/events.h" +#include "rsyslog_plugin/rsyslog_plugin.h" +#include "rsyslog_plugin/syslog_parser.h" + +using namespace std; +using json = nlohmann::json; + +RsyslogPlugin* plugin = NULL; + +json j_list_1 = json::array(); +json j_list_2 = json::array(); +vector test_expressions_1; +vector test_expressions_2; + +void createTests() { + string regex_string_1 = "timestamp (.*) message (.*) other_data (.*)"; + string regex_string_2 = "no match"; + + json j_test_1; + j_test_1["tag"] = "test_tag_1"; + j_test_1["regex"] = regex_string_1; + j_test_1["params"] = { "timestamp", "message", "other_data" }; + j_list_1.push_back(j_test_1); + + json j_test_2; + j_test_2["tag"] = "test_tag_2"; + j_test_2["regex"] = regex_string_2; + j_test_2["params"] = {}; + j_list_2.push_back(j_test_2); + + regex expression_1(regex_string_1); + test_expressions_1.push_back(expression_1); + regex expression_2(regex_string_2); + test_expressions_2.push_back(expression_2); +} + + +TEST(syslog_parser, matching_regex) { + createTests(); + string tag = ""; + event_params_t param_dict; + + event_params_t expected_dict; + expected_dict["timestamp"] = "test_timestamp"; + expected_dict["message"] = "test_message"; + expected_dict["other_data"] = "test_data"; + + SyslogParser* parser = new SyslogParser(test_expressions_1, j_list_1); + + bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, param_dict); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag_1", tag); + EXPECT_EQ(expected_dict, param_dict); + + delete parser; +} + +TEST(syslog_parser, no_matching_regex) { + string tag = ""; + event_params_t param_dict; + SyslogParser* parser = new SyslogParser(test_expressions_2, j_list_2); + bool success = parser->parseMessage("Test Message", tag, param_dict); + EXPECT_EQ(false, success); + delete parser; +} + + +void createPlugin(string path) { + SyslogParser* testParser = new SyslogParser({}, json::array()); + plugin = new RsyslogPlugin(testParser, "test_mod_name", path); +} + +TEST(rsyslog_plugin, createRegexList_invalidJS0N) { + createPlugin("./test_regex_1.rc.json"); + if(plugin != NULL) { + EXPECT_EQ(false, plugin->createRegexList()); + } + delete plugin; +} + +TEST(rsyslog_plugin, createRegexList_missingRegex) { + createPlugin("./test_regex_3.rc.json"); + if(plugin != NULL) { + EXPECT_EQ(false, plugin->createRegexList()); + } + delete plugin; +} + +TEST(rsyslog_plugin, createRegexList_invalidRegex) { + createPlugin("./test_regex_4.rc.json"); + if(plugin != NULL) { + EXPECT_EQ(false, plugin->createRegexList()); + } + delete plugin; +} + +TEST(rsyslog_plugin, createRegexList_validRegex) { + createPlugin("./test_regex_2.rc.json"); + if(plugin != NULL) { + auto parser = plugin->parser; + EXPECT_EQ(1, parser->regex_list.size()); + EXPECT_EQ(1, parser->expressions.size()); + + ifstream infile("test_syslogs.txt"); + string log_message; + bool parse_result; + + while(infile >> log_message >> parse_result) { + string tag = ""; + event_params_t param_dict; + EXPECT_EQ(parse_result, parser->parseMessage(log_message, tag, param_dict)); + } + } + delete plugin; +} + +int main(int argc, char* argv[]) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json new file mode 100644 index 000000000000..72e8ffc0db8a --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json @@ -0,0 +1 @@ +* diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json new file mode 100644 index 000000000000..5c9a2e9612ed --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "bgp-state", + "regex": "([a-zA-Z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}) .* %ADJCHANGE: neighbor (.*) (Up|Down) .*", + "params": [ "timestamp", "neighbor_ip", "state" ] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json new file mode 100644 index 000000000000..2e67e88f8448 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json @@ -0,0 +1,6 @@ +[ + { + "tag": "TEST-TAG-NO-REGEX", + "param": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json new file mode 100644 index 000000000000..244b601fbac5 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "TEST-TAG-INVALID-REGEX", + "regex": "++", + "params": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt new file mode 100644 index 000000000000..78f89aec3d28 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt @@ -0,0 +1,4 @@ +"Aug 17 02:39:21.286611 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.90 Down Neighbor deleted" true +"Aug 17 02:46:42.615668 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.90 Up" true +"Aug 17 04:46:51.290979 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.78 Down Neighbor deleted" true +"Aug 17 04:46:51.290979 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %NOEVENT: no event" false From 3082cf8407f71aa6cd05b292a648a21835ae6211 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 14 Jun 2022 01:45:04 +0000 Subject: [PATCH 27/66] Add syslog if init fails --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index b6ff51355186..29293fd430f3 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -73,5 +73,7 @@ RsyslogPlugin::RsyslogPlugin(SyslogParser* syslog_parser, string mod_name, strin parser = syslog_parser; module_name = mod_name; regex_path = path; - onInit(); + if(!onInit()) { + SWSS_LOG_ERROR("Initializing rsyslog plugin failed.\n"); + } } From ee28577a5e03c971e53d067802a3544b7ff533dd Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 14 Jun 2022 01:47:25 +0000 Subject: [PATCH 28/66] Fix onInit logic --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index 631652cbb5af..252083117b2e 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -34,7 +34,7 @@ class RsyslogPlugin { bool onInit() { event_handle = events_init_publisher(module_name); int return_code = createRegexList(); - return (event_handle != NULL || return_code == 0); + return (event_handle != NULL && return_code == 0); } }; From dc691a1953edad2f5032d56c8f88702743823c3a Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 15:55:29 +0000 Subject: [PATCH 29/66] enable unit test run in Makefile --- src/sonic-eventd/Makefile | 2 +- src/sonic-eventd/tools/events_tool.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index aecd5af294d3..4ea8f60458e0 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -43,7 +43,7 @@ eventd-tests: $(TEST_OBJS) @echo 'Invoking: G++ Linker' $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) @echo 'Finished building target: $@' - @echo 'Blocked $(EVENTD_TEST)' + $(EVENTD_TEST) @echo 'Finished running tests' @echo ' ' diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 3f63273709c9..1d26746dfa3d 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -247,7 +247,7 @@ do_send(const string infile, int cnt, int pause) void usage() { - printf(s_usage); + printf("%s", s_usage); exit(-1); } From f4a0c82d32df42ba38fc6478fcbd2db675de5364 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 21:27:06 +0000 Subject: [PATCH 30/66] Added graceful restart --- .../yang-events/sonic-events-bgp.yang | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 17fa64c82fbd..59fa8bd6968b 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -47,6 +47,34 @@ module sonic-events-bgp { uses evtcmn:sonic-events-cmn; } + container bgp-restart { + oc-alarm-types:MINOR + + description " + Declares an event for BGP state for a neighbor IP going up/down.; + + leaf is-all { + type boolean; + description "true - if restart is for all neighbors"; + } + + leaf ip { + type inet:ip-address; + description "IP of neighbor, if restart is for a neighbor"; + } + + leaf status { + type enumeration { + enum "up"; + enum "down"; + + } + description "Provides the status as up (true) or down (false)"; + } + + uses evtcmn:sonic-events-cmn; + } + container notification { oc-alarm-types:MAJOR @@ -78,6 +106,7 @@ module sonic-events-bgp { leaf is-sent { type boolean; description "true - if this notification was for sent messages; false if it was for received."; + } uses evtcmn:sonic-events-cmn; } From dbcc4dfa9b2dad60b16fea2e5920cd602e935acd Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 21:51:46 +0000 Subject: [PATCH 31/66] added comments --- src/sonic-eventd/src/eventd.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 6fd433bba9ea..3616cd4f549a 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -164,7 +164,24 @@ capture_service::do_capture() this_thread::sleep_for(chrono::milliseconds(10)); } - /* Check read events against provided cache until as many events are read.*/ + /* + * Check read events against provided cache until as many events are read. + * to avoid sending duplicates. + * After starting cache service, the caller drains his local cache, which + * could potentially return a new event, that both caller & cache service reads. + * + * The cache service connects but defers any reading until caller provides + * the startup cache. But all events that arrived since connect, though not read + * will be held by ZMQ in its local cache. + * + * When cache service starts reading, check against the initial stock for duplicates. + * m_pre_exist_id caches the last seq number in initial stock for each runtime id. + * So only allow sequence number greater than cached number. + * + * Theoretically all the events provided via initial stock could be duplicates. + * Hence until as many events as in initial stock or until the cached id map + * is empty, do this check. + */ init_cnt = (int)m_events.size(); while((m_ctrl == START_CAPTURE) && !m_pre_exist_id.empty() && (init_cnt > 0)) { From f66beac67149faf9cff92ac697ac1c541ce72375 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 14 Jun 2022 23:16:21 +0000 Subject: [PATCH 32/66] tool print is updated as not to truncate --- src/sonic-eventd/tests/eventd_ut.cpp | 23 +++++++--------------- src/sonic-eventd/tools/events_tool.cpp | 27 ++++++++++++++++++++------ src/sonic-eventd/tools/sample_ip.json | 2 +- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index a70c7a1c9d3b..6edee6d36fdb 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -204,18 +204,11 @@ void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_SUBSCRIBE, "", 0)); EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); - if (cnt == 0) { - while(!term) { - if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { - lst.push_back(ev_int); - read_source.swap(source); - cnt = (int)lst.size(); - } - } - } - else { - while(!term) { - this_thread::sleep_for(chrono::milliseconds(100)); + while(!term) { + if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { + lst.push_back(ev_int); + read_source.swap(source); + cnt = (int)lst.size(); } } @@ -330,7 +323,7 @@ TEST(eventd, capture) */ bool term_sub = false; string sub_source; - int sub_evts_sz; + int sub_evts_sz = 0; internal_events_lst_t sub_evts; /* run_pub details */ @@ -360,7 +353,6 @@ TEST(eventd, capture) * Block sub from calling zmq_message_read as capture service is calling * and zmq_message_read crashes on access from more than one thread. */ - sub_evts_sz = -1; thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ @@ -455,7 +447,7 @@ TEST(eventd, captureCacheMax) */ bool term_sub = false; string sub_source; - int sub_evts_sz; + int sub_evts_sz = 0; internal_events_lst_t sub_evts; /* run_pub details */ @@ -485,7 +477,6 @@ TEST(eventd, captureCacheMax) * Block sub from calling zmq_message_read as capture service is calling * and zmq_message_read crashes on access from more than one thread. */ - sub_evts_sz = -1; thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 1d26746dfa3d..3044c0a557a1 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -57,6 +57,24 @@ Note:\n\ bool term_receive = false; +template +string +t_map_to_str(const Map &m) +{ + stringstream _ss; + string sep; + + _ss << "{"; + for (const auto elem: m) { + _ss << sep << "{" << elem.first << "," << elem.second << "}"; + if (sep.empty()) { + sep = ", "; + } + } + _ss << "}"; + return _ss.str(); +} + void do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt=0, int pause=0) { @@ -91,8 +109,8 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt total_missed += missed_cnt; - evt[key] = map_to_str(params); - (*fp) << map_to_str(evt) << "\n"; + evt[key] = t_map_to_str(params); + (*fp) << t_map_to_str(evt) << "\n"; fp->flush(); if ((++index % PRINT_CHUNK_SZ) == 0) { @@ -117,7 +135,6 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt #if 0 -test code void parse_file(const string infile) { @@ -144,7 +161,6 @@ parse_file(const string infile) } #endif - int do_send(const string infile, int cnt, int pause) { @@ -196,7 +212,7 @@ do_send(const string infile, int cnt, int pause) #if 0 cout << "Events to send\n"; for(lst_t::const_iterator itc=lst.begin(); itc != lst.end(); ++itc) { - cout << "tag:" << itc->tag << " params:" << map_to_str(itc->params) << "\n"; + cout << "tag:" << itc->tag << " params:" << t_map_to_str(itc->params) << "\n"; } cout << "Events END\n"; #endif @@ -328,7 +344,6 @@ int main(int argc, char **argv) ASSERT(false, "Elect -s for send or -r receive or both; Bailing out with no action\n"); } - printf("--------- END: Good run -----------------\n"); return 0; } diff --git a/src/sonic-eventd/tools/sample_ip.json b/src/sonic-eventd/tools/sample_ip.json index 4c17073c3d91..acb8726cf253 100644 --- a/src/sonic-eventd/tools/sample_ip.json +++ b/src/sonic-eventd/tools/sample_ip.json @@ -1 +1 @@ -{"key-0": {"foo": "bar", "hello": "world" }} +{"src_0:key-0": {"foo": "bar", "hello": "world" }} From 7cb88d74f52bdaed5a322e3748a2609aa235ffec Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 15 Jun 2022 20:07:24 +0000 Subject: [PATCH 33/66] Add makefile and resolve PR comments --- src/sonic-eventd/Makefile | 83 +++++++++++ src/sonic-eventd/rsyslog_plugin/main.cpp | 29 ++-- .../rsyslog_plugin/rsyslog_plugin.cpp | 76 +++++----- .../rsyslog_plugin/rsyslog_plugin.h | 24 ++-- src/sonic-eventd/rsyslog_plugin/subdir.mk | 13 ++ .../rsyslog_plugin/syslog_parser.cpp | 37 ++--- .../rsyslog_plugin/syslog_parser.h | 8 +- .../rsyslog_plugin_ut.cpp | 131 +++++++++--------- .../rsyslog_plugin_tests/subdir.mk | 12 ++ .../rsyslog_plugin_tests/test_regex_5.rc.json | 7 + .../rsyslog_plugin_tests/test_syslogs_2.txt | 3 + 11 files changed, 262 insertions(+), 161 deletions(-) create mode 100644 src/sonic-eventd/Makefile create mode 100644 src/sonic-eventd/rsyslog_plugin/subdir.mk create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/subdir.mk create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile new file mode 100644 index 000000000000..b21689a76fa0 --- /dev/null +++ b/src/sonic-eventd/Makefile @@ -0,0 +1,83 @@ +RM := rm -rf +EVENTD_TARGET := eventd +EVENTD_TEST := tests/tests +EVENTD_TOOL := tools/events_tool +RSYSLOG-PLUGIN_TARGET := rsyslog_plugin +RSYSLOG-PLUGIN_TEST: rsyslog_plugin_tests/tests +CP := cp +MKDIR := mkdir +CC := g++ +MV := mv +LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid +TEST_LIBS := -L/usr/src/gtest -lgtest -lgtest_main -lgmock -lgmock_main + +CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common +PWD := $(shell pwd) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(strip $(C_DEPS)),) +-include $(C_DEPS) $(OBJS) +endif +endif + +-include src/subdir.mk +-include tests/subdir.mk +-include tools/subdir.mk +-include rsyslog_plugin/subdir.mk + +all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin rsyslog-plugin-tests + +sonic-eventd: $(OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TARGET) $(OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +eventd-tool: $(TOOL_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TOOL) $(TOOL_OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +rsyslog-plugin: $(RSYSLOG-PLUGIN_OBJS) + @echo 'Buidling Target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(RSYSLOG-PLUGIN_TARGET) $(RSYSLOG-PLUGIN_OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +eventd-tests: $(TEST_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) + @echo 'Finished building target: $@' + $(EVENTD_TEST) + @echo 'Finished running tests' + @echo ' ' + +rsyslog-plugin-tests: $(RSYSLOG-PLUGIN-TEST_OBJS) + @echo 'BUILDING target: $@' + @echo 'Invoking G++ Linker' + $(CC) $(LDFLAGS) -o $(RSYSLOG-PLUGIN_TEST) $(RSYSLOG-PLUGIN-TEST_OBJS) $(LIBS) $(TEST_LIBS) + @echo 'Finished building target: $@' + $(RSYSLOG-PLUGIN_TEST) + @echo 'Finished running tests' + @echo ' ' + +install: + $(MKDIR) -p $(DESTDIR)/usr/sbin + $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + $(MV) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin + $(MV) $(RSYSLOG-PLUGIN_TARGET) $(DESTDIR)/usr/sbin + +deinstall: + $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) + $(RM) $(DESTDIR)/usr/sbin/$(RSYSLOG-PLUGIN_TARGET) + $(RM) -rf $(DESTDIR)/usr/sbin + +clean: + -@echo ' ' + +.PHONY: all clean dependents diff --git a/src/sonic-eventd/rsyslog_plugin/main.cpp b/src/sonic-eventd/rsyslog_plugin/main.cpp index 6bf8862b73ff..033d45f4ad58 100644 --- a/src/sonic-eventd/rsyslog_plugin/main.cpp +++ b/src/sonic-eventd/rsyslog_plugin/main.cpp @@ -6,10 +6,10 @@ using namespace std; void showUsage() { - cerr << "Usage for rsyslog_plugin: " << " SOURCES\n" - << "Options:\n" - << "\t-r,required,type=string\t\tPath to regex file" - << "\t-m,required,type=string\t\tYANG module name of source generating syslog message" + cout << "Usage for rsyslog_plugin: \n" << "options\n" + << "\t-r,required,type=string\t\tPath to regex file\n" + << "\t-m,required,type=string\t\tYANG module name of source generating syslog message\n" + << "\t-h \t\tHelp" << endl; } @@ -18,18 +18,16 @@ int main(int argc, char** argv) { string module_name; int option_val; - while((option_val = getopt(argc, argv, "r:m:")) != -1) { + while((option_val = getopt(argc, argv, "r:m:h")) != -1) { switch(option_val) { case 'r': - if(optarg != NULL) { - regex_path = optarg; - } + regex_path = optarg; break; case 'm': - if(optarg != NULL) { - module_name = optarg; - } + module_name = optarg; break; + case 'h': + case '?': default: showUsage(); return 1; @@ -37,12 +35,15 @@ int main(int argc, char** argv) { } if(regex_path.empty() || module_name.empty()) { // Missing required rc path - showUsage(); + cerr << "Error: Missing regex_path and module_name." << endl; return 1; } - SyslogParser* parser = new SyslogParser({}, json::array()); - RsyslogPlugin* plugin = new RsyslogPlugin(parser, module_name, regex_path); + RsyslogPlugin* plugin = new RsyslogPlugin(module_name, regex_path); + if(!plugin->onInit()) { + SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized.\n"); + return 1; + } plugin->run(); return 0; diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index 29293fd430f3..b6e7b43b17bf 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -6,74 +6,82 @@ #include "rsyslog_plugin.h" #include "common/logger.h" #include "common/json.hpp" -#include "common/events.h" using namespace std; using namespace swss; using json = nlohmann::json; -void RsyslogPlugin::onMessage(string msg) { - string tag = ""; +bool RsyslogPlugin::onMessage(string msg) { + string tag; event_params_t param_dict; - if(!parser->parseMessage(msg, tag, param_dict)) { - SWSS_LOG_INFO("%s was not able to be parsed into a structured event\n", msg.c_str()); + if(!m_parser->parseMessage(msg, tag, param_dict)) { + SWSS_LOG_DEBUG("%s was not able to be parsed into a structured event\n", msg.c_str()); + return false; } else { - int return_code = event_publish(fetchHandle(), tag, ¶m_dict); - if (return_code != 0) { - SWSS_LOG_INFO("rsyslog_plugin was not able to publish event for %s\n", tag.c_str()); - } - } -} - -[[noreturn]] void RsyslogPlugin::run() { - while(true) { - string line; - getline(cin, line); - if(line.empty()) { - continue; + int return_code = event_publish(m_event_handle, tag, ¶m_dict); + if (return_code != 0) { + SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s. last thrown event error: %d\n", tag.c_str(), event_last_error()); + return false; } - onMessage(line); + return true; } } bool RsyslogPlugin::createRegexList() { fstream regex_file; - regex_file.open(regex_path, ios::in); + regex_file.open(m_regex_path, ios::in); if (!regex_file) { - SWSS_LOG_ERROR("No such path exists: %s for source %s\n", regex_path.c_str(), module_name.c_str()); + SWSS_LOG_ERROR("No such path exists: %s for source %s\n", m_regex_path.c_str(), m_module_name.c_str()); return false; } try { - regex_file >> parser->regex_list; + regex_file >> m_parser->m_regex_list; } catch (exception& exception) { - SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", regex_path.c_str(), exception.what()); + SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", m_regex_path.c_str(), exception.what()); return false; } - string regex_string = ""; + string regex_string; regex expression; - for(long unsigned int i = 0; i < parser->regex_list.size(); i++) { + for(long unsigned int i = 0; i < m_parser->m_regex_list.size(); i++) { try { - regex_string = parser->regex_list[i]["regex"]; + regex_string = m_parser->m_regex_list[i]["regex"]; + string tag = m_parser->m_regex_list[i]["tag"]; + vector params = m_parser->m_regex_list[i]["params"]; regex expr(regex_string); expression = expr; } catch (exception& exception) { SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", exception.what()); return false; } - parser->expressions.push_back(expression); + m_parser->m_expressions.push_back(expression); } regex_file.close(); return true; } - -RsyslogPlugin::RsyslogPlugin(SyslogParser* syslog_parser, string mod_name, string path) { - parser = syslog_parser; - module_name = mod_name; - regex_path = path; - if(!onInit()) { - SWSS_LOG_ERROR("Initializing rsyslog plugin failed.\n"); +[[noreturn]] void RsyslogPlugin::run() { + while(true) { + string line; + getline(cin, line); + if(line.empty()) { + continue; + } + if(!onMessage(line)) { + SWSS_LOG_DEBUG("RsyslogPlugin was not able to parse or publish the log message: %s\n", line.c_str()); + } } } + +bool RsyslogPlugin::onInit() { + m_event_handle = events_init_publisher(m_module_name); + bool return_code = createRegexList(); + return (m_event_handle != NULL && return_code); +} + +RsyslogPlugin::RsyslogPlugin(string module_name, string regex_path) { + m_parser = new SyslogParser({}, json::array()); + m_module_name = module_name; + m_regex_path = regex_path; +} diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index 252083117b2e..de387c3c068b 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -18,25 +18,17 @@ using json = nlohmann::json; class RsyslogPlugin { public: - RsyslogPlugin(SyslogParser* syslog_parser, string mod_name, string path); - void onMessage(string msg); + bool onInit(); + bool onMessage(string msg); void run(); - bool createRegexList(); - event_handle_t fetchHandle() { - return event_handle; - } - SyslogParser* parser; + RsyslogPlugin(string module_name, string regex_path); private: - string regex_path; - string module_name; - event_handle_t event_handle; - - bool onInit() { - event_handle = events_init_publisher(module_name); - int return_code = createRegexList(); - return (event_handle != NULL && return_code == 0); - } + SyslogParser* m_parser; + event_handle_t m_event_handle; + string m_regex_path; + string m_module_name; + bool createRegexList(); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin/subdir.mk b/src/sonic-eventd/rsyslog_plugin/subdir.mk new file mode 100644 index 000000000000..e5af1085da42 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/subdir.mk @@ -0,0 +1,13 @@ +CC := g++ + +RSYSLOG-PLUGIN_TEST_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o +RSYSLOG-PLUGIN_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/main.o + +C_DEPS += ./rsyslog_plugin/rsyslog_plugin.d ./rsyslog_plugin/syslog_parser.d ./rsyslog_plugin/main.d + +rsyslog_plugin/%.o: rsyslog_plugin/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst rsyslog_plugin/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$(@)" "$<" + @echo 'Finished building: $< + '@echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index fa56aeff2245..6d3ab8505adc 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -16,34 +16,23 @@ using namespace std; */ bool SyslogParser::parseMessage(string message, string& event_tag, event_params_t& param_map) { - for(long unsigned int i = 0; i < regex_list.size(); i++) { + for(long unsigned int i = 0; i < m_regex_list.size(); i++) { smatch match_results; - regex_search(message, match_results, expressions[i]); - vector groups; - vector params; - try { - event_tag = regex_list[i]["tag"]; - vector p = regex_list[i]["params"]; - params = p; - } catch (exception& exception) { - SWSS_LOG_ERROR("Invalid regex list, throws exception: %s\n", exception.what()); - return false; - } - // first match in groups is entire message - for(long unsigned int j = 1; j < match_results.size(); j++) { - groups.push_back(match_results.str(j)); - } - if (groups.size() == params.size()) { // found matching regex - transform(params.begin(), params.end(), groups.begin(), inserter(param_map, param_map.end()), [](string a, string b) { - return make_pair(a,b); - }); - return true; + vector params = m_regex_list[i]["params"]; + if(!regex_search(message, match_results, m_expressions[i]) || params.size() != match_results.size() - 1) { + continue; } + // found matching regex + event_tag = m_regex_list[i]["tag"]; + transform(params.begin(), params.end(), match_results.begin() + 1, inserter(param_map, param_map.end()), [](string a, string b) { + return make_pair(a,b); + }); + return true; } return false; } -SyslogParser::SyslogParser(vector regex_expressions, json list) { - expressions = regex_expressions; - regex_list = list; +SyslogParser::SyslogParser(vector expressions, json regex_list) { + m_expressions = expressions; + m_regex_list = regex_list; } diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index 5e970f84fa9a..d6b39326b290 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -18,11 +18,11 @@ using json = nlohmann::json; class SyslogParser { public: - SyslogParser(vector regex_expressions, json list); - bool parseMessage(string message, string& tag, event_params_t& param_dict); + vector m_expressions; + json m_regex_list = json::array(); - vector expressions; - json regex_list = json::array(); + SyslogParser(vector expressions, json regex_list); + bool parseMessage(string message, string& tag, event_params_t& param_dict); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 12f9a247acf5..b40832b34fbc 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -10,39 +10,19 @@ using namespace std; using json = nlohmann::json; -RsyslogPlugin* plugin = NULL; - -json j_list_1 = json::array(); -json j_list_2 = json::array(); -vector test_expressions_1; -vector test_expressions_2; - -void createTests() { - string regex_string_1 = "timestamp (.*) message (.*) other_data (.*)"; - string regex_string_2 = "no match"; - - json j_test_1; - j_test_1["tag"] = "test_tag_1"; - j_test_1["regex"] = regex_string_1; - j_test_1["params"] = { "timestamp", "message", "other_data" }; - j_list_1.push_back(j_test_1); - - json j_test_2; - j_test_2["tag"] = "test_tag_2"; - j_test_2["regex"] = regex_string_2; - j_test_2["params"] = {}; - j_list_2.push_back(j_test_2); - - regex expression_1(regex_string_1); - test_expressions_1.push_back(expression_1); - regex expression_2(regex_string_2); - test_expressions_2.push_back(expression_2); -} - - TEST(syslog_parser, matching_regex) { - createTests(); - string tag = ""; + json j_list = json::array(); + vector test_expressions; + string regex_string = "timestamp (.*) message (.*) other_data (.*)"; + json j_test; + j_test["tag"] = "test_tag"; + j_test["regex"] = regex_string; + j_test["params"] = { "timestamp", "message", "other_data" }; + j_list.push_back(j_test); + regex expression(regex_string); + test_expressions.push_back(expression); + + string tag; event_params_t param_dict; event_params_t expected_dict; @@ -50,72 +30,85 @@ TEST(syslog_parser, matching_regex) { expected_dict["message"] = "test_message"; expected_dict["other_data"] = "test_data"; - SyslogParser* parser = new SyslogParser(test_expressions_1, j_list_1); + SyslogParser* parser = new SyslogParser(test_expressions, j_list); bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, param_dict); EXPECT_EQ(true, success); - EXPECT_EQ("test_tag_1", tag); + EXPECT_EQ("test_tag", tag); EXPECT_EQ(expected_dict, param_dict); delete parser; } TEST(syslog_parser, no_matching_regex) { - string tag = ""; + json j_list = json::array(); + vector test_expressions; + string regex_string = "no match"; + json j_test; + j_test["tag"] = "test_tag"; + j_test["regex"] = regex_string; + j_test["params"] = vector(); + j_list.push_back(j_test); + regex expression(regex_string); + test_expressions.push_back(expression); + + string tag; event_params_t param_dict; - SyslogParser* parser = new SyslogParser(test_expressions_2, j_list_2); + SyslogParser* parser = new SyslogParser(test_expressions, j_list); bool success = parser->parseMessage("Test Message", tag, param_dict); EXPECT_EQ(false, success); delete parser; } -void createPlugin(string path) { - SyslogParser* testParser = new SyslogParser({}, json::array()); - plugin = new RsyslogPlugin(testParser, "test_mod_name", path); +RsyslogPlugin* createPlugin(string path) { + RsyslogPlugin* plugin = new RsyslogPlugin("test_mod_name", path); + return plugin; } -TEST(rsyslog_plugin, createRegexList_invalidJS0N) { - createPlugin("./test_regex_1.rc.json"); - if(plugin != NULL) { - EXPECT_EQ(false, plugin->createRegexList()); - } +TEST(rsyslog_plugin, onInit_invalidJS0N) { + auto plugin = createPlugin("./test_regex_1.rc.json"); + EXPECT_EQ(false, plugin->onInit()); delete plugin; } -TEST(rsyslog_plugin, createRegexList_missingRegex) { - createPlugin("./test_regex_3.rc.json"); - if(plugin != NULL) { - EXPECT_EQ(false, plugin->createRegexList()); - } +TEST(rsyslog_plugin, onInit_missingRegex) { + auto plugin = createPlugin("./test_regex_3.rc.json"); + EXPECT_EQ(false, plugin->onInit()); delete plugin; } -TEST(rsyslog_plugin, createRegexList_invalidRegex) { - createPlugin("./test_regex_4.rc.json"); - if(plugin != NULL) { - EXPECT_EQ(false, plugin->createRegexList()); +TEST(rsyslog_plugin, onInit_invalidRegex) { + auto plugin = createPlugin("./test_regex_4.rc.json"); + EXPECT_EQ(false, plugin->onInit()); + delete plugin; +} + +TEST(rsyslog_plugin, onMessage) { + auto plugin = createPlugin("./test_regex_2.rc.json"); + EXPECT_EQ(true, plugin->onInit()); + + ifstream infile("test_syslogs.txt"); + string log_message; + bool parse_result; + while(infile >> log_message >> parse_result) { + EXPECT_EQ(parse_result, plugin->onMessage(log_message)); } + infile.close(); delete plugin; } -TEST(rsyslog_plugin, createRegexList_validRegex) { - createPlugin("./test_regex_2.rc.json"); - if(plugin != NULL) { - auto parser = plugin->parser; - EXPECT_EQ(1, parser->regex_list.size()); - EXPECT_EQ(1, parser->expressions.size()); - - ifstream infile("test_syslogs.txt"); - string log_message; - bool parse_result; - - while(infile >> log_message >> parse_result) { - string tag = ""; - event_params_t param_dict; - EXPECT_EQ(parse_result, parser->parseMessage(log_message, tag, param_dict)); - } +TEST(rsyslog_plugin, onMessage_noParams) { + auto plugin = createPlugin("./test_regex_5.rc.json"); + EXPECT_EQ(true, plugin->onInit()); + + ifstream infile("test_syslogs_2.txt"); + string log_message; + bool parse_result; + while(infile >> log_message >> parse_result) { + EXPECT_EQ(parse_result, plugin->onMessage(log_message)); } + infile.close(); delete plugin; } diff --git a/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk new file mode 100644 index 000000000000..dd2e47364a2b --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +RSYSLOG-PLUGIN_TEST_OBJS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.o + +C_DEPS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.d + +rsyslog_plugin_tests/%.o: rsyslog_plugin_tests/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst rsyslog_plugin_tests/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$(@)" "$<" + @echo 'Finished building: $< + '@echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json new file mode 100644 index 000000000000..ddaf37c931a8 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "test_tag", + "regex": ".*", + "params": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt new file mode 100644 index 000000000000..d56615f61681 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt @@ -0,0 +1,3 @@ +testMessage true +another_test_message true + true From b92bb297a00121e0807c2400855394ca61e6eb80 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 15 Jun 2022 21:02:20 +0000 Subject: [PATCH 34/66] Remove redundant headers, etc --- src/sonic-eventd/rsyslog_plugin/main.cpp | 3 --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp | 3 --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h | 5 ++--- src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp | 5 ----- 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/main.cpp b/src/sonic-eventd/rsyslog_plugin/main.cpp index 033d45f4ad58..10821c984557 100644 --- a/src/sonic-eventd/rsyslog_plugin/main.cpp +++ b/src/sonic-eventd/rsyslog_plugin/main.cpp @@ -1,9 +1,6 @@ #include #include #include "rsyslog_plugin.h" -#include "syslog_parser.h" - -using namespace std; void showUsage() { cout << "Usage for rsyslog_plugin: \n" << "options\n" diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index b6e7b43b17bf..c4e027960aa7 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -4,11 +4,8 @@ #include #include #include "rsyslog_plugin.h" -#include "common/logger.h" #include "common/json.hpp" -using namespace std; -using namespace swss; using json = nlohmann::json; bool RsyslogPlugin::onMessage(string msg) { diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index de387c3c068b..fea6cef63e13 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -2,13 +2,12 @@ #define RSYSLOG_PLUGIN_H #include -#include #include "syslog_parser.h" -#include "common/logger.h" #include "common/events.h" +#include "common/logger.h" using namespace std; -using json = nlohmann::json; +using namespace swss; /** * Rsyslog Plugin will utilize an instance of a syslog parser to read syslog messages from rsyslog.d and will continuously read from stdin diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index 6d3ab8505adc..e5b1dca78e58 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -1,12 +1,7 @@ #include -#include -#include -#include #include "syslog_parser.h" #include "common/logger.h" - -using namespace std; /** * Parses syslog message and returns structured event * From 4468e1197b83f255bf46dda7446582181abd5454 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 15 Jun 2022 21:26:25 +0000 Subject: [PATCH 35/66] Add logic for empty regex expressions --- src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp | 6 ++++++ src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json | 0 2 files changed, 6 insertions(+) create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index b40832b34fbc..2a8f41e7c840 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -72,6 +72,12 @@ TEST(rsyslog_plugin, onInit_invalidJS0N) { delete plugin; } +TEST(rsyslog_plugin, onInit_emptyJSON) { + auto plugin = createPlugin("./test_regex_6.rc.json"); + EXPECT_EQ(false, plugin->onInit()); + delete plugin; +} + TEST(rsyslog_plugin, onInit_missingRegex) { auto plugin = createPlugin("./test_regex_3.rc.json"); EXPECT_EQ(false, plugin->onInit()); diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json new file mode 100644 index 000000000000..e69de29bb2d1 From c57de232a6b215f073d0042a9ded0885add10884 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 15 Jun 2022 23:57:10 +0000 Subject: [PATCH 36/66] removed restart, as that does not address graceful restart --- .../yang-events/sonic-events-bgp.yang | 28 ------------------- 1 file changed, 28 deletions(-) diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang index 59fa8bd6968b..6c275c754e4b 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang @@ -47,34 +47,6 @@ module sonic-events-bgp { uses evtcmn:sonic-events-cmn; } - container bgp-restart { - oc-alarm-types:MINOR - - description " - Declares an event for BGP state for a neighbor IP going up/down.; - - leaf is-all { - type boolean; - description "true - if restart is for all neighbors"; - } - - leaf ip { - type inet:ip-address; - description "IP of neighbor, if restart is for a neighbor"; - } - - leaf status { - type enumeration { - enum "up"; - enum "down"; - - } - description "Provides the status as up (true) or down (false)"; - } - - uses evtcmn:sonic-events-cmn; - } - container notification { oc-alarm-types:MAJOR From 0eec6615673aee323d2e370281358c350b355709 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 16 Jun 2022 00:31:47 +0000 Subject: [PATCH 37/66] Remove redundant syslog messages --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index c4e027960aa7..1ad52fb83829 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -54,6 +54,10 @@ bool RsyslogPlugin::createRegexList() { } m_parser->m_expressions.push_back(expression); } + if(m_parser->m_expressions.empty()) { + SWSS_LOG_ERROR("Empty list of regex expressions.\n"); + return false; + } regex_file.close(); return true; } @@ -65,9 +69,7 @@ bool RsyslogPlugin::createRegexList() { if(line.empty()) { continue; } - if(!onMessage(line)) { - SWSS_LOG_DEBUG("RsyslogPlugin was not able to parse or publish the log message: %s\n", line.c_str()); - } + onMessage(line); } } From 15969a7b9b231b17cebe4221abfd60f1aefe82c5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 17 Jun 2022 17:17:50 +0000 Subject: [PATCH 38/66] name change; test update --- src/sonic-eventd/debian/control | 2 +- src/sonic-eventd/tests/eventd_ut.cpp | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/sonic-eventd/debian/control b/src/sonic-eventd/debian/control index 8611b8f1ac81..95ae6fd76452 100644 --- a/src/sonic-eventd/debian/control +++ b/src/sonic-eventd/debian/control @@ -1,7 +1,7 @@ Source: sonic-eventd Section: devel Priority: optional -Maintainer: Kelly Yeh +Maintainer: Renuka Manavalan Build-Depends: debhelper (>= 12.0.0), libevent-dev, libboost-thread-dev, libboost-system-dev, libswsscommon-dev Standards-Version: 3.9.3 Homepage: https://github.com/Azure/sonic-buildimage diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 6edee6d36fdb..f7f882dcaf84 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -172,6 +172,8 @@ void run_cap(void *zctx, bool &term, string &read_source, EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); while(!term) { + int rc; +#if 0 /* * Don't call zmq_message_read as that is not thread safe * Subscriber thread is already calling. @@ -179,10 +181,21 @@ void run_cap(void *zctx, bool &term, string &read_source, zmq_msg_t source, data; zmq_msg_init(&source); zmq_msg_init(&data); - int rc = zmq_msg_recv(&source, mock_cap, 0); + rc = zmq_msg_recv(&source, mock_cap, 0); if (rc != -1) { rc = zmq_msg_recv(&data, mock_cap, 0); } +#else + /* + * Intending to make it thread safe. + * Fix, if test fails. Else it is already good. + */ + { + string source; + internal_event_t ev_int; + rc = zmq_message_read(mock_cap, 0, source, ev_int); + } +#endif if (rc != -1) { cnt = ++i; } From fea1de69b04565f81558462181e25716cb12ab37 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 17 Jun 2022 17:33:54 +0000 Subject: [PATCH 39/66] Comments added for i/p file format --- src/sonic-eventd/tools/events_tool.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 3044c0a557a1..140ad9fc5619 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -3,6 +3,16 @@ #include "events.h" #include "events_common.h" +/* + * Sample i/p file contents for send + * + * {"src_0:key-0": {"foo": "bar", "hello": "world" }} + * {"src_0:key-1": {"foo": "barXX", "hello": "world" }} + * + * Repeat the above line to increase entries. + * Each line is parsed independently, so no "," expected at the end. + */ + #define ASSERT(res, m, ...) \ if (!(res)) {\ int _e = errno; \ From 5916a5a7e8e43a987e43bc604a4151b1d5ccdcce Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 20 Jun 2022 20:40:09 +0000 Subject: [PATCH 40/66] Resolve PR comments --- src/sonic-eventd/rsyslog_plugin/main.cpp | 42 ++++---- .../rsyslog_plugin/rsyslog_plugin.cpp | 73 +++++++------- .../rsyslog_plugin/rsyslog_plugin.h | 14 +-- .../rsyslog_plugin/syslog_parser.cpp | 29 +++--- .../rsyslog_plugin/syslog_parser.h | 6 +- .../rsyslog_plugin_ut.cpp | 96 ++++++++++--------- 6 files changed, 138 insertions(+), 122 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/main.cpp b/src/sonic-eventd/rsyslog_plugin/main.cpp index 10821c984557..53162608c5a9 100644 --- a/src/sonic-eventd/rsyslog_plugin/main.cpp +++ b/src/sonic-eventd/rsyslog_plugin/main.cpp @@ -1,7 +1,13 @@ #include +#include #include #include "rsyslog_plugin.h" +#define SUCCESS_CODE 0 +#define INVALID_REGEX_ERROR_CODE 1 +#define EVENT_INIT_PUBLISH_ERROR_CODE 2 +#define MISSING_ARGS_ERROR_CODE 3 + void showUsage() { cout << "Usage for rsyslog_plugin: \n" << "options\n" << "\t-r,required,type=string\t\tPath to regex file\n" @@ -11,17 +17,17 @@ void showUsage() { } int main(int argc, char** argv) { - string regex_path; - string module_name; - int option_val; + string regexPath; + string moduleName; + int optionVal; - while((option_val = getopt(argc, argv, "r:m:h")) != -1) { - switch(option_val) { + while((optionVal = getopt(argc, argv, "r:m:h")) != -1) { + switch(optionVal) { case 'r': - regex_path = optarg; + regexPath = optarg; break; case 'm': - module_name = optarg; + moduleName = optarg; break; case 'h': case '?': @@ -31,17 +37,21 @@ int main(int argc, char** argv) { } } - if(regex_path.empty() || module_name.empty()) { // Missing required rc path - cerr << "Error: Missing regex_path and module_name." << endl; - return 1; + if(regexPath.empty() || moduleName.empty()) { // Missing required rc path + cerr << "Error: Missing regexPath and moduleName." << endl; + return MISSING_ARGS_ERROR_CODE; } - - RsyslogPlugin* plugin = new RsyslogPlugin(module_name, regex_path); - if(!plugin->onInit()) { - SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized.\n"); - return 1; + + unique_ptr plugin(new RsyslogPlugin(moduleName, regexPath)); + int returnCode = plugin->onInit(); + if(returnCode == INVALID_REGEX_ERROR_CODE) { + SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized due to invalid regex file provided.\n"); + return returnCode; + } else if(returnCode == EVENT_INIT_PUBLISH_ERROR_CODE) { + SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized due to event_init_publish call failing.\n"); + return returnCode; } plugin->run(); - return 0; + return SUCCESS_CODE; } diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index 1ad52fb83829..ca50f465a79b 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -1,4 +1,3 @@ -#include #include #include #include @@ -10,55 +9,58 @@ using json = nlohmann::json; bool RsyslogPlugin::onMessage(string msg) { string tag; - event_params_t param_dict; - if(!m_parser->parseMessage(msg, tag, param_dict)) { + event_params_t paramDict; + if(!m_parser->parseMessage(msg, tag, paramDict)) { SWSS_LOG_DEBUG("%s was not able to be parsed into a structured event\n", msg.c_str()); - return false; + return false; } else { - int return_code = event_publish(m_event_handle, tag, ¶m_dict); - if (return_code != 0) { + int returnCode = event_publish(m_eventHandle, tag, ¶mDict); + if (returnCode != 0) { SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s. last thrown event error: %d\n", tag.c_str(), event_last_error()); - return false; + return false; } - return true; + return true; } } bool RsyslogPlugin::createRegexList() { - fstream regex_file; - regex_file.open(m_regex_path, ios::in); - if (!regex_file) { - SWSS_LOG_ERROR("No such path exists: %s for source %s\n", m_regex_path.c_str(), m_module_name.c_str()); + fstream regexFile; + regexFile.open(m_regexPath, ios::in); + if (!regexFile) { + SWSS_LOG_ERROR("No such path exists: %s for source %s\n", m_regexPath.c_str(), m_moduleName.c_str()); return false; } try { - regex_file >> m_parser->m_regex_list; - } catch (exception& exception) { - SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", m_regex_path.c_str(), exception.what()); + regexFile >> m_parser->m_regexList; + } catch (invalid_argument& iaException) { + SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", m_regexPath.c_str(), iaException.what()); return false; } - string regex_string; + string regexString; regex expression; - for(long unsigned int i = 0; i < m_parser->m_regex_list.size(); i++) { + for(long unsigned int i = 0; i < m_parser->m_regexList.size(); i++) { try { - regex_string = m_parser->m_regex_list[i]["regex"]; - string tag = m_parser->m_regex_list[i]["tag"]; - vector params = m_parser->m_regex_list[i]["params"]; - regex expr(regex_string); + regexString = m_parser->m_regexList[i]["regex"]; + string tag = m_parser->m_regexList[i]["tag"]; + vector params = m_parser->m_regexList[i]["params"]; + regex expr(regexString); expression = expr; - } catch (exception& exception) { - SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", exception.what()); + } catch (domain_error& deException) { + SWSS_LOG_ERROR("Missing required key, throws exception: %s\n", deException.what()); return false; - } + } catch (regex_error& reException) { + SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", reException.what()); + return false; + } m_parser->m_expressions.push_back(expression); } if(m_parser->m_expressions.empty()) { SWSS_LOG_ERROR("Empty list of regex expressions.\n"); return false; } - regex_file.close(); + regexFile.close(); return true; } @@ -73,14 +75,19 @@ bool RsyslogPlugin::createRegexList() { } } -bool RsyslogPlugin::onInit() { - m_event_handle = events_init_publisher(m_module_name); - bool return_code = createRegexList(); - return (m_event_handle != NULL && return_code); +int RsyslogPlugin::onInit() { + m_eventHandle = events_init_publisher(m_moduleName); + bool success = createRegexList(); + if(!success) { + return 1; // invalid regex error code + } else if(m_eventHandle == NULL) { + return 2; // event init publish error code + } + return 0; } -RsyslogPlugin::RsyslogPlugin(string module_name, string regex_path) { - m_parser = new SyslogParser({}, json::array()); - m_module_name = module_name; - m_regex_path = regex_path; +RsyslogPlugin::RsyslogPlugin(string moduleName, string regexPath) { + m_parser = unique_ptr(new SyslogParser()); + m_moduleName = moduleName; + m_regexPath = regexPath; } diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index fea6cef63e13..e5c5b13fca71 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -2,6 +2,7 @@ #define RSYSLOG_PLUGIN_H #include +#include #include "syslog_parser.h" #include "common/events.h" #include "common/logger.h" @@ -17,16 +18,15 @@ using namespace swss; class RsyslogPlugin { public: - bool onInit(); + int onInit(); bool onMessage(string msg); void run(); - RsyslogPlugin(string module_name, string regex_path); + RsyslogPlugin(string moduleName, string regexPath); private: - SyslogParser* m_parser; - event_handle_t m_event_handle; - string m_regex_path; - string m_module_name; - + unique_ptr m_parser; + event_handle_t m_eventHandle; + string m_regexPath; + string m_moduleName; bool createRegexList(); }; diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index e5b1dca78e58..a025cc029536 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -10,24 +10,19 @@ * */ -bool SyslogParser::parseMessage(string message, string& event_tag, event_params_t& param_map) { - for(long unsigned int i = 0; i < m_regex_list.size(); i++) { - smatch match_results; - vector params = m_regex_list[i]["params"]; - if(!regex_search(message, match_results, m_expressions[i]) || params.size() != match_results.size() - 1) { +bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap) { + for(long unsigned int i = 0; i < m_regexList.size(); i++) { + smatch matchResults; + vector params = m_regexList[i]["params"]; + if(!regex_search(message, matchResults, m_expressions[i]) || params.size() != matchResults.size() - 1) { continue; - } - // found matching regex - event_tag = m_regex_list[i]["tag"]; - transform(params.begin(), params.end(), match_results.begin() + 1, inserter(param_map, param_map.end()), [](string a, string b) { - return make_pair(a,b); - }); - return true; + } + // found matching regex + eventTag = m_regexList[i]["tag"]; + transform(params.begin(), params.end(), matchResults.begin() + 1, inserter(paramMap, paramMap.end()), [](string a, string b) { + return make_pair(a,b); + }); + return true; } return false; } - -SyslogParser::SyslogParser(vector expressions, json regex_list) { - m_expressions = expressions; - m_regex_list = regex_list; -} diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index d6b39326b290..53128f1edf99 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -19,10 +19,8 @@ using json = nlohmann::json; class SyslogParser { public: vector m_expressions; - json m_regex_list = json::array(); - - SyslogParser(vector expressions, json regex_list); - bool parseMessage(string message, string& tag, event_params_t& param_dict); + json m_regexList = json::array(); + bool parseMessage(string message, string& tag, event_params_t& paramDict); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 2a8f41e7c840..5893c70f66c0 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -11,51 +11,57 @@ using namespace std; using json = nlohmann::json; TEST(syslog_parser, matching_regex) { - json j_list = json::array(); - vector test_expressions; - string regex_string = "timestamp (.*) message (.*) other_data (.*)"; - json j_test; - j_test["tag"] = "test_tag"; - j_test["regex"] = regex_string; - j_test["params"] = { "timestamp", "message", "other_data" }; - j_list.push_back(j_test); - regex expression(regex_string); - test_expressions.push_back(expression); + json jList = json::array(); + vector testExpressions; + string regexString = "timestamp (.*) message (.*) other_data (.*)"; + json jTest; + jTest["tag"] = "test_tag"; + jTest["regex"] = regexString; + jTest["params"] = { "timestamp", "message", "other_data" }; + jList.push_back(jTest); + regex expression(regexString); + testExpressions.push_back(expression); string tag; - event_params_t param_dict; + event_params_t paramDict; - event_params_t expected_dict; - expected_dict["timestamp"] = "test_timestamp"; - expected_dict["message"] = "test_message"; - expected_dict["other_data"] = "test_data"; + event_params_t expectedDict; + expectedDict["timestamp"] = "test_timestamp"; + expectedDict["message"] = "test_message"; + expectedDict["other_data"] = "test_data"; - SyslogParser* parser = new SyslogParser(test_expressions, j_list); + SyslogParser* parser = new SyslogParser(); + parser->m_expressions = testExpressions; + parser->m_regexList = jList; - bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, param_dict); + bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, paramDict); EXPECT_EQ(true, success); EXPECT_EQ("test_tag", tag); - EXPECT_EQ(expected_dict, param_dict); + EXPECT_EQ(expectedDict, paramDict); delete parser; } TEST(syslog_parser, no_matching_regex) { - json j_list = json::array(); - vector test_expressions; - string regex_string = "no match"; - json j_test; - j_test["tag"] = "test_tag"; - j_test["regex"] = regex_string; - j_test["params"] = vector(); - j_list.push_back(j_test); - regex expression(regex_string); - test_expressions.push_back(expression); + json jList = json::array(); + vector testExpressions; + string regexString = "no match"; + json jTest; + jTest["tag"] = "test_tag"; + jTest["regex"] = regexString; + jTest["params"] = vector(); + jList.push_back(jTest); + regex expression(regexString); + testExpressions.push_back(expression); string tag; - event_params_t param_dict; - SyslogParser* parser = new SyslogParser(test_expressions, j_list); - bool success = parser->parseMessage("Test Message", tag, param_dict); + event_params_t paramDict; + + SyslogParser* parser = new SyslogParser(); + parser->m_expressions = testExpressions; + parser->m_regexList = jList; + + bool success = parser->parseMessage("Test Message", tag, paramDict); EXPECT_EQ(false, success); delete parser; } @@ -68,37 +74,37 @@ RsyslogPlugin* createPlugin(string path) { TEST(rsyslog_plugin, onInit_invalidJS0N) { auto plugin = createPlugin("./test_regex_1.rc.json"); - EXPECT_EQ(false, plugin->onInit()); + EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_emptyJSON) { auto plugin = createPlugin("./test_regex_6.rc.json"); - EXPECT_EQ(false, plugin->onInit()); + EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_missingRegex) { auto plugin = createPlugin("./test_regex_3.rc.json"); - EXPECT_EQ(false, plugin->onInit()); + EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_invalidRegex) { auto plugin = createPlugin("./test_regex_4.rc.json"); - EXPECT_EQ(false, plugin->onInit()); + EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onMessage) { auto plugin = createPlugin("./test_regex_2.rc.json"); - EXPECT_EQ(true, plugin->onInit()); + EXPECT_EQ(0, plugin->onInit()); ifstream infile("test_syslogs.txt"); - string log_message; - bool parse_result; - while(infile >> log_message >> parse_result) { - EXPECT_EQ(parse_result, plugin->onMessage(log_message)); + string logMessage; + bool parseResult; + while(infile >> logMessage >> parseResult) { + EXPECT_EQ(parseResult, plugin->onMessage(logMessage)); } infile.close(); delete plugin; @@ -106,13 +112,13 @@ TEST(rsyslog_plugin, onMessage) { TEST(rsyslog_plugin, onMessage_noParams) { auto plugin = createPlugin("./test_regex_5.rc.json"); - EXPECT_EQ(true, plugin->onInit()); + EXPECT_EQ(0, plugin->onInit()); ifstream infile("test_syslogs_2.txt"); - string log_message; - bool parse_result; - while(infile >> log_message >> parse_result) { - EXPECT_EQ(parse_result, plugin->onMessage(log_message)); + string logMessage; + bool parseResult; + while(infile >> logMessage >> parseResult) { + EXPECT_EQ(parseResult, plugin->onMessage(logMessage)); } infile.close(); delete plugin; From ec671c00606c4d6690464283844f4ed83275d7f5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 20 Jun 2022 23:04:24 +0000 Subject: [PATCH 41/66] No logical chenges; little re-order --- src/sonic-eventd/src/eventd.cpp | 115 ++++++++++++++----------- src/sonic-eventd/tests/eventd_ut.cpp | 81 +++++++++-------- src/sonic-eventd/tools/events_tool.cpp | 8 +- 3 files changed, 116 insertions(+), 88 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 3616cd4f549a..558cc2ff447b 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -23,8 +23,6 @@ #define VEC_SIZE(p) ((int)p.size()) -extern int zerrno; - int eventd_proxy::init() { @@ -137,15 +135,26 @@ void capture_service::do_capture() { int rc; - runtime_id_t rid; - sequence_t seq; int block_ms=300; - internal_event_t event; - string source, evt_str; int init_cnt; - void *sock = NULL; - sock = zmq_socket(m_ctx, ZMQ_SUB); + typedef enum { + /* + * In this state every event read is compared with init cache given + * Only new events are saved. + */ + CAP_STATE_INIT = 0, + + /* In this state, all events read are cached until max limit */ + CAP_STATE_ACTIVE, + + /* Cache has hit max. Hence only save last event for each runime ID */ + CAP_STATE_LAST + } cap_state_t; + + cap_state_t cap_state = CAP_STATE_INIT; + + void *sock = zmq_socket(m_ctx, ZMQ_SUB); RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); rc = zmq_connect(sock, get_config(string(CAPTURE_END_KEY)).c_str()); @@ -165,11 +174,6 @@ capture_service::do_capture() } /* - * Check read events against provided cache until as many events are read. - * to avoid sending duplicates. - * After starting cache service, the caller drains his local cache, which - * could potentially return a new event, that both caller & cache service reads. - * * The cache service connects but defers any reading until caller provides * the startup cache. But all events that arrived since connect, though not read * will be held by ZMQ in its local cache. @@ -183,71 +187,86 @@ capture_service::do_capture() * is empty, do this check. */ init_cnt = (int)m_events.size(); - while((m_ctrl == START_CAPTURE) && !m_pre_exist_id.empty() && (init_cnt > 0)) { - if (zmq_message_read(sock, 0, source, event) == -1) { - RET_ON_ERR(zerrno == EAGAIN, - "0:Failed to read from capture socket"); + /* Read until STOP_CAPTURE */ + while(m_ctrl == START_CAPTURE) { + runtime_id_t rid; + sequence_t seq; + internal_event_t event; + string source, evt_str; + + if ((rc = zmq_message_read(sock, 0, source, event)) != 0) { + /* + * The capture socket captures SUBSCRIBE requests too. + * The messge could contain subscribe filter strings and binary code. + * Empty string with binary code will fail to deserialize. + * Else would fail event validation. + */ + RET_ON_ERR((rc == EAGAIN) || (rc == ERR_MESSAGE_INVALID), + "0:Failed to read from capture socket"); + continue; + } + if (!validate_event(event, rid, seq)) { + continue; } - else if (validate_event(event, rid, seq)) { + serialize(event, evt_str); + + switch(cap_state) { + case CAP_STATE_INIT: + { bool add = true; init_cnt--; - serialize(event, evt_str); pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); if (it != m_pre_exist_id.end()) { if (seq <= it->second) { + /* Duplicate; Later/same seq in cache. */ add = false; } if (seq >= it->second) { + /* new one; This runtime ID need not be checked again */ m_pre_exist_id.erase(it); } } if (add) { m_events.push_back(evt_str); } - } - } - pre_exist_id_t().swap(m_pre_exist_id); - - /* Save until max allowed */ - while((m_ctrl == START_CAPTURE) && (VEC_SIZE(m_events) < m_cache_max)) { + } + if(m_pre_exist_id.empty() || (init_cnt <= 0)) { + /* Init check is no more needed. */ + pre_exist_id_t().swap(m_pre_exist_id); + cap_state = CAP_STATE_ACTIVE; + } + break; - if (zmq_message_read(sock, 0, source, event) == -1) { - RET_ON_ERR(zerrno == EAGAIN, - "1: Failed to read from capture socket"); - } - else if (validate_event(event, rid, seq)) { - serialize(event, evt_str); + case CAP_STATE_ACTIVE: + /* Save until max allowed */ try { m_events.push_back(evt_str); + if (VEC_SIZE(m_events) >= m_cache_max) { + cap_state = CAP_STATE_LAST; + } + break; } - catch (exception& e) + catch (bad_alloc& e) { stringstream ss; ss << e.what(); SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", ss.str().c_str(), VEC_SIZE(m_events)); - break; + cap_state = CAP_STATE_LAST; + // fall through to save this event in last set. } - } - } - - /* Clear the map, created to ensure memory space available */ - m_last_events.clear(); - m_last_events_init = true; - - /* Save only last event per sender */ - while(m_ctrl == START_CAPTURE) { - - if (zmq_message_read(sock, 0, source, event) == -1) { - RET_ON_ERR(zerrno == EAGAIN, - "2:Failed to read from capture socket"); - } else if (validate_event(event, rid, seq)) { - serialize(event, evt_str); + case CAP_STATE_LAST: + if (!m_last_events_init) { + /* Clear the map, created to ensure memory space available */ + m_last_events.clear(); + m_last_events_init = true; + } m_last_events[rid] = evt_str; + break; } } diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index f7f882dcaf84..678b03739795 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -172,35 +172,13 @@ void run_cap(void *zctx, bool &term, string &read_source, EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); while(!term) { - int rc; -#if 0 - /* - * Don't call zmq_message_read as that is not thread safe - * Subscriber thread is already calling. - */ - zmq_msg_t source, data; - zmq_msg_init(&source); - zmq_msg_init(&data); - rc = zmq_msg_recv(&source, mock_cap, 0); - if (rc != -1) { - rc = zmq_msg_recv(&data, mock_cap, 0); - } -#else - /* - * Intending to make it thread safe. - * Fix, if test fails. Else it is already good. - */ - { - string source; - internal_event_t ev_int; - rc = zmq_message_read(mock_cap, 0, source, ev_int); - } -#endif - if (rc != -1) { + string source; + internal_event_t ev_int; + + if (0 == zmq_message_read(mock_cap, 0, source, ev_int)) { cnt = ++i; } } - zmq_close(mock_cap); } @@ -321,6 +299,10 @@ TEST(eventd, proxy) zmq_close(mock_pub); zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + printf("eventd_proxy is tested GOOD\n"); } @@ -362,10 +344,6 @@ TEST(eventd, capture) EXPECT_EQ(0, pxy->init()); /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ - /* - * Block sub from calling zmq_message_read as capture service is calling - * and zmq_message_read crashes on access from more than one thread. - */ thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ @@ -374,6 +352,9 @@ TEST(eventd, capture) /* Expect START_CAPTURE */ EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); + /* Initialize the capture */ + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + EXPECT_TRUE(init_cache > 1); EXPECT_TRUE((cache_max+3) < ARRAY_SIZE(ldata)); @@ -409,7 +390,6 @@ TEST(eventd, capture) } } - EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); /* Init pub connection */ @@ -422,7 +402,7 @@ TEST(eventd, capture) run_pub(mock_pub, wr_source, wr_evts); /* Provide time for async message receive. */ - this_thread::sleep_for(chrono::milliseconds(100)); + this_thread::sleep_for(chrono::milliseconds(200)); /* Stop capture, closes socket & terminates the thread */ EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); @@ -433,7 +413,21 @@ TEST(eventd, capture) /* Read the cache */ EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); +#ifdef DEBUG_TEST + if ((evts_read.size() != evts_expect.size()) || + (last_evts_read.size() != last_evts_exp.size())) { + printf("size: sub_evts_sz=%d sub_evts=%d\n", sub_evts_sz, (int)sub_evts.size()); + printf("init_cache=%d cache_max=%d\n", init_cache, cache_max); + printf("evts_start=%d evts_expect=%d evts_read=%d\n", + (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); + printf("last_evts_exp=%d last_evts_read=%d\n", (int)last_evts_exp.size(), + (int)last_evts_read.size()); + } +#endif + + EXPECT_EQ(evts_read.size(), evts_expect.size()); EXPECT_EQ(evts_read, evts_expect); + EXPECT_EQ(last_evts_read.size(), last_evts_exp.size()); EXPECT_EQ(last_evts_read, last_evts_exp); delete pxy; @@ -446,6 +440,10 @@ TEST(eventd, capture) zmq_close(mock_pub); zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + printf("Capture TEST completed\n"); } @@ -486,10 +484,6 @@ TEST(eventd, captureCacheMax) EXPECT_EQ(0, pxy->init()); /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ - /* - * Block sub from calling zmq_message_read as capture service is calling - * and zmq_message_read crashes on access from more than one thread. - */ thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ @@ -551,6 +545,17 @@ TEST(eventd, captureCacheMax) /* Read the cache */ EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); +#ifdef DEBUG_TEST + if ((evts_read.size() != evts_expect.size()) || + !last_evts_read.empty()) { + printf("size: sub_evts_sz=%d sub_evts=%d\n", sub_evts_sz, (int)sub_evts.size()); + printf("init_cache=%d cache_max=%d\n", init_cache, cache_max); + printf("evts_start=%d evts_expect=%d evts_read=%d\n", + (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); + printf("last_evts_read=%d\n", (int)last_evts_read.size()); + } +#endif + EXPECT_EQ(evts_read, evts_expect); EXPECT_TRUE(last_evts_read.empty()); @@ -564,6 +569,10 @@ TEST(eventd, captureCacheMax) zmq_close(mock_pub); zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + printf("Capture TEST with matchinhg cache-max completed\n"); } diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 140ad9fc5619..86e12b368ccd 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -109,9 +109,9 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt int missed_cnt=-1; int rc = event_receive(h, key, params, missed_cnt); - if (rc == -1) { - ASSERT(event_last_error() == EAGAIN, "Failed to receive rc=%d err=%d\n", - rc, event_last_error()); + if (rc != 0) { + ASSERT(rc == EAGAIN, "Failed to receive rc=%d index=%d\n", + rc, index); continue; } ASSERT(!key.empty(), "received EMPTY key"); @@ -253,7 +253,7 @@ do_send(const string infile, int cnt, int pause) } int rc = event_publish(h, evt.tag, evt.params.empty() ? NULL : &evt.params); - ASSERT(rc == 0, "Failed to publish index=%d", index); + ASSERT(rc == 0, "Failed to publish index=%d rc=%d", index, rc); if ((cnt > 0) && (--cnt == 0)) { /* set to termninate */ From ae60e1b627bf49f0a3390ef7f705ab3e18dbf916 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 21 Jun 2022 15:10:03 +0000 Subject: [PATCH 42/66] per review comments; No logical code change --- src/sonic-eventd/src/eventd.cpp | 30 ++++++++++++++-------------- src/sonic-eventd/tests/eventd_ut.cpp | 4 ++-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 558cc2ff447b..18ebfaf182f0 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -214,24 +214,24 @@ capture_service::do_capture() switch(cap_state) { case CAP_STATE_INIT: { - bool add = true; - init_cnt--; - pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); - - if (it != m_pre_exist_id.end()) { - if (seq <= it->second) { - /* Duplicate; Later/same seq in cache. */ - add = false; + bool add = true; + init_cnt--; + pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); + + if (it != m_pre_exist_id.end()) { + if (seq <= it->second) { + /* Duplicate; Later/same seq in cache. */ + add = false; + } + if (seq >= it->second) { + /* new one; This runtime ID need not be checked again */ + m_pre_exist_id.erase(it); + } } - if (seq >= it->second) { - /* new one; This runtime ID need not be checked again */ - m_pre_exist_id.erase(it); + if (add) { + m_events.push_back(evt_str); } } - if (add) { - m_events.push_back(evt_str); - } - } if(m_pre_exist_id.empty() || (init_cnt <= 0)) { /* Init check is no more needed. */ pre_exist_id_t().swap(m_pre_exist_id); diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 678b03739795..ab51758aa214 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -606,7 +606,7 @@ TEST(eventd, service) * It runs proxy & capture service * It uses its own zmq context */ - thread thrSvc(&run_eventd_service); + thread thread_service(&run_eventd_service); /* Need client side service to interact with server side */ EXPECT_EQ(0, service.init_client(zctx)); @@ -641,7 +641,7 @@ TEST(eventd, service) service.close_service(); - thrSvc.join(); + thread_service.join(); zmq_ctx_term(zctx); printf("Service TEST completed\n"); From 9393101862d5bb56710b40277711b1fc4727dbfa Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 21 Jun 2022 21:13:05 +0000 Subject: [PATCH 43/66] leafref added per review comments --- .../yang-events/sonic-events-dhcp_relay.yang | 4 ++++ src/sonic-yang-models/yang-events/sonic-events-swss.yang | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang index a580afdf3a1a..b4a07555a21a 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang @@ -46,6 +46,10 @@ module sonic-events-dhcp-relay { description "Name of the i/f discarding"; } + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + uses evtcmn:sonic-events-cmn; } diff --git a/src/sonic-yang-models/yang-events/sonic-events-swss.yang b/src/sonic-yang-models/yang-events/sonic-events-swss.yang index 2b5bbca3031f..50ee7471b829 100644 --- a/src/sonic-yang-models/yang-events/sonic-events-swss.yang +++ b/src/sonic-yang-models/yang-events/sonic-events-swss.yang @@ -52,6 +52,10 @@ module sonic-events-swss { description "Interface name"; } + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + leaf status { type enumeration { enum "up"; @@ -77,6 +81,10 @@ module sonic-events-swss { description "Interface name"; } + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + leaf queue_index { type uint8; } From acfd64c6cd65ca4f4be2c13522d23a8a8b9d28e4 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 21 Jun 2022 22:18:43 +0000 Subject: [PATCH 44/66] per review comments; No logical code changes --- src/sonic-eventd/src/eventd.cpp | 26 +++++++++++++------------- src/sonic-eventd/tests/eventd_ut.cpp | 2 +- src/sonic-eventd/tools/events_tool.cpp | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 18ebfaf182f0..6a008da22e88 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -448,21 +448,21 @@ run_eventd_service() } { - int sz = VEC_SIZE(capture_fifo_events) < READ_SET_SIZE ? - VEC_SIZE(capture_fifo_events) : READ_SET_SIZE; - - if (sz != 0) { - auto it = std::next(capture_fifo_events.begin(), sz); - move(capture_fifo_events.begin(), capture_fifo_events.end(), - back_inserter(resp_data)); - - if (sz == VEC_SIZE(capture_fifo_events)) { - event_serialized_lst_t().swap(capture_fifo_events); - } else { - capture_fifo_events.erase(capture_fifo_events.begin(), it); + int sz = VEC_SIZE(capture_fifo_events) < READ_SET_SIZE ? + VEC_SIZE(capture_fifo_events) : READ_SET_SIZE; + + if (sz != 0) { + auto it = std::next(capture_fifo_events.begin(), sz); + move(capture_fifo_events.begin(), capture_fifo_events.end(), + back_inserter(resp_data)); + + if (sz == VEC_SIZE(capture_fifo_events)) { + event_serialized_lst_t().swap(capture_fifo_events); + } else { + capture_fifo_events.erase(capture_fifo_events.begin(), it); + } } } - } break; diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index ab51758aa214..101944fbeb8d 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -238,7 +238,7 @@ TEST(eventd, proxy) { debug_on(); - printf("PROxy TEST started\n"); + printf("Proxy TEST started\n"); bool term_sub = false; bool term_cap = false; string rd_csource, rd_source, wr_source("hello"); diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 86e12b368ccd..975b8b7fb9ea 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -115,7 +115,7 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt continue; } ASSERT(!key.empty(), "received EMPTY key"); - ASSERT(missed_cnt >= 0, "MIssed count uninitialized"); + ASSERT(missed_cnt >= 0, "Missed count uninitialized"); total_missed += missed_cnt; From 8a4c59387a8fc347647713ee1e5b18a2b3b17db4 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Tue, 5 Jul 2022 15:20:40 +0000 Subject: [PATCH 45/66] Added LIBSWSSCOMMON in DEPENDS --- rules/telemetry.mk | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rules/telemetry.mk b/rules/telemetry.mk index 392ed4f28680..81b5b6bcc11f 100644 --- a/rules/telemetry.mk +++ b/rules/telemetry.mk @@ -2,6 +2,8 @@ SONIC_TELEMETRY = sonic-telemetry_0.1_$(CONFIGURED_ARCH).deb $(SONIC_TELEMETRY)_SRC_PATH = $(SRC_PATH)/sonic-telemetry -$(SONIC_TELEMETRY)_DEPENDS = $(SONIC_MGMT_COMMON) $(SONIC_MGMT_COMMON_CODEGEN) -$(SONIC_TELEMETRY)_RDEPENDS = +$(SONIC_TELEMETRY)_DEPENDS = $(SONIC_MGMT_COMMON) $(SONIC_MGMT_COMMON_CODEGEN) \ + $(LIBSAIREDIS_DEV) $(LIBSAIMETADATA_DEV) $(LIBSWSSCOMMON_DEV) $(LIBSWSSCOMMON) +$(SONIC_TELEMETRY)_RDEPENDS = $(LIBSAIREDIS) $(LIBSAIMETADATA) \ + $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) SONIC_DPKG_DEBS += $(SONIC_TELEMETRY) From 266c1619a0f1971584479c8f2eba6789488e31c3 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 8 Jul 2022 00:03:35 +0000 Subject: [PATCH 46/66] removed unused eventd.sh --- files/build_templates/sonic_debian_extension.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 8ba54f6a36f4..d3caf046ccf9 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -813,7 +813,6 @@ sudo LANG=C cp $SCRIPTS_DIR/bgp.sh $FILESYSTEM_ROOT/usr/local/bin/bgp.sh sudo LANG=C cp $SCRIPTS_DIR/teamd.sh $FILESYSTEM_ROOT/usr/local/bin/teamd.sh sudo LANG=C cp $SCRIPTS_DIR/lldp.sh $FILESYSTEM_ROOT/usr/local/bin/lldp.sh sudo LANG=C cp $SCRIPTS_DIR/radv.sh $FILESYSTEM_ROOT/usr/local/bin/radv.sh -sudo LANG=C cp $SCRIPTS_DIR/eventd.sh $FILESYSTEM_ROOT/usr/local/bin/eventd.sh sudo LANG=C cp $SCRIPTS_DIR/asic_status.sh $FILESYSTEM_ROOT/usr/local/bin/asic_status.sh sudo LANG=C cp $SCRIPTS_DIR/asic_status.py $FILESYSTEM_ROOT/usr/local/bin/asic_status.py From 202589f97e3554e96065b8f8600710c455ede8c7 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 8 Jul 2022 23:31:36 +0000 Subject: [PATCH 47/66] fix compile errors in rsyslog; adapt to an API change in tools --- rules/docker-eventd.mk | 2 ++ src/sonic-eventd/Makefile | 6 ++--- .../rsyslog_plugin/rsyslog_plugin.cpp | 6 ++--- .../rsyslog_plugin/rsyslog_plugin.h | 4 ++-- src/sonic-eventd/rsyslog_plugin/subdir.mk | 4 ++-- .../rsyslog_plugin/syslog_parser.cpp | 2 +- .../rsyslog_plugin/syslog_parser.h | 4 ++-- src/sonic-eventd/tools/events_tool.cpp | 24 +++++++++---------- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk index 304f295e2a4b..15aca80df2db 100644 --- a/rules/docker-eventd.mk +++ b/rules/docker-eventd.mk @@ -36,3 +36,5 @@ $(DOCKER_EVENTD)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro SONIC_BULLSEYE_DOCKERS += $(DOCKER_EVENTD) SONIC_BULLSEYE_DBG_DOCKERS += $(DOCKER_EVENTD_DBG) + +SONIC_COPY_FILES += $((SONIC_EVENTD)_SRC_PATH)/rsyslog_plugin diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index c402bc0e68a5..ed54ea170e7d 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -2,8 +2,8 @@ RM := rm -rf EVENTD_TARGET := eventd EVENTD_TEST := tests/tests EVENTD_TOOL := tools/events_tool -RSYSLOG-PLUGIN_TARGET := rsyslog_plugin -RSYSLOG-PLUGIN_TEST: rsyslog_plugin_tests/tests +RSYSLOG-PLUGIN_TARGET := rsyslog_plugin/rsyslog_plugin +RSYSLOG-PLUGIN_TEST := rsyslog_plugin_tests/tests CP := cp MKDIR := mkdir CC := g++ @@ -26,7 +26,7 @@ endif -include rsyslog_plugin/subdir.mk -include rsyslog_plugin_tests/subdir.mk -all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin rsyslog-plugin-tests +all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin sonic-eventd: $(OBJS) @echo 'Building target: $@' diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index ca50f465a79b..854570c57b7e 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -3,7 +3,7 @@ #include #include #include "rsyslog_plugin.h" -#include "common/json.hpp" +#include "json.hpp" using json = nlohmann::json; @@ -16,7 +16,7 @@ bool RsyslogPlugin::onMessage(string msg) { } else { int returnCode = event_publish(m_eventHandle, tag, ¶mDict); if (returnCode != 0) { - SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s. last thrown event error: %d\n", tag.c_str(), event_last_error()); + SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s.\n", tag.c_str()); return false; } return true; @@ -64,7 +64,7 @@ bool RsyslogPlugin::createRegexList() { return true; } -[[noreturn]] void RsyslogPlugin::run() { +void RsyslogPlugin::run() { while(true) { string line; getline(cin, line); diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index e5c5b13fca71..7a1082998c74 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -4,8 +4,8 @@ #include #include #include "syslog_parser.h" -#include "common/events.h" -#include "common/logger.h" +#include "events.h" +#include "logger.h" using namespace std; using namespace swss; diff --git a/src/sonic-eventd/rsyslog_plugin/subdir.mk b/src/sonic-eventd/rsyslog_plugin/subdir.mk index e5af1085da42..7e38de4b902f 100644 --- a/src/sonic-eventd/rsyslog_plugin/subdir.mk +++ b/src/sonic-eventd/rsyslog_plugin/subdir.mk @@ -9,5 +9,5 @@ rsyslog_plugin/%.o: rsyslog_plugin/%.cpp @echo 'Building file: $<' @echo 'Invoking: GCC C++ Compiler' $(CC) -D__FILENAME__="$(subst rsyslog_plugin/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$(@)" "$<" - @echo 'Finished building: $< - '@echo ' ' + @echo 'Finished building: $<' + @echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index a025cc029536..0ab1f7c9b103 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -1,6 +1,6 @@ #include #include "syslog_parser.h" -#include "common/logger.h" +#include "logger.h" /** * Parses syslog message and returns structured event diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index 53128f1edf99..e0e9252aaa63 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -4,8 +4,8 @@ #include #include #include -#include "common/json.hpp" -#include "common/events.h" +#include "json.hpp" +#include "events.h" using namespace std; using json = nlohmann::json; diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 975b8b7fb9ea..4e42591d4596 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -103,24 +103,22 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt ASSERT(h != NULL, "Failed to get subscriber handle"); while(!term_receive) { - string key; - event_params_t params; - map_str_str_t evt; - int missed_cnt=-1; + event_receive_op_t evt; + map_str_str_t evtOp; - int rc = event_receive(h, key, params, missed_cnt); - if (rc != 0) { - ASSERT(rc == EAGAIN, "Failed to receive rc=%d index=%d\n", - rc, index); + evt = event_receive(h); + if (evt.rc != 0) { + ASSERT(evt.rc == EAGAIN, "Failed to receive rc=%d index=%d\n", + evt.rc, index); continue; } - ASSERT(!key.empty(), "received EMPTY key"); - ASSERT(missed_cnt >= 0, "Missed count uninitialized"); + ASSERT(!evt.key.empty(), "received EMPTY key"); + ASSERT(evt.missed_cnt >= 0, "Missed count uninitialized"); - total_missed += missed_cnt; + total_missed += evt.missed_cnt; - evt[key] = t_map_to_str(params); - (*fp) << t_map_to_str(evt) << "\n"; + evtOp[evt.key] = t_map_to_str(evt.params); + (*fp) << t_map_to_str(evtOp) << "\n"; fp->flush(); if ((++index % PRINT_CHUNK_SZ) == 0) { From b75607b954d680dbbd3772182acb8e977494f865 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Sun, 10 Jul 2022 23:00:52 +0000 Subject: [PATCH 48/66] first cut: Integrating with BGP container --- dockers/docker-fpm-frr/Dockerfile.j2 | 6 ++++++ dockers/docker-fpm-frr/bgpcfgd_regex.json | 13 +++++++++++++ dockers/docker-fpm-frr/events_info.json | 10 ++++++++++ files/build_templates/rsyslog_plugin.conf.j2 | 19 +++++++++++++++++++ .../build_templates/sonic_debian_extension.j2 | 4 ++++ rules/docker-config-engine-bullseye.mk | 5 ++++- rules/docker-config-engine-buster.mk | 1 + rules/docker-eventd.mk | 8 +++++++- rules/scripts.mk | 4 ++++ 9 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 dockers/docker-fpm-frr/bgpcfgd_regex.json create mode 100644 dockers/docker-fpm-frr/events_info.json create mode 100644 files/build_templates/rsyslog_plugin.conf.j2 diff --git a/dockers/docker-fpm-frr/Dockerfile.j2 b/dockers/docker-fpm-frr/Dockerfile.j2 index f885180c37d4..2290bc0c5915 100644 --- a/dockers/docker-fpm-frr/Dockerfile.j2 +++ b/dockers/docker-fpm-frr/Dockerfile.j2 @@ -56,9 +56,15 @@ COPY ["TSC", "/usr/bin/TSC"] COPY ["TS", "/usr/bin/TS"] COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] COPY ["zsocket.sh", "/usr/bin/"] +COPY ["*.json", "/etc/rsyslog.d/"] +COPY ["files/rsyslog_plugin.conf.j2", "/etc/rsyslog.d/"] RUN chmod a+x /usr/bin/TSA && \ chmod a+x /usr/bin/TSB && \ chmod a+x /usr/bin/TSC && \ chmod a+x /usr/bin/zsocket.sh +RUN j2 -f json /etc/rsyslog.d/rsyslog_plugin.conf.j2 /etc/rsyslog.d/events_info.json > /etc/rsyslog.d/bgp_events.conf +RUN rm -f /etc/rsyslog.d/rsyslog_plugin.conf.j2* +RUN rm -f /etc/rsyslog.d/events_info.json* + ENTRYPOINT ["/usr/bin/docker_init.sh"] diff --git a/dockers/docker-fpm-frr/bgpcfgd_regex.json b/dockers/docker-fpm-frr/bgpcfgd_regex.json new file mode 100644 index 000000000000..7bc361c5e21d --- /dev/null +++ b/dockers/docker-fpm-frr/bgpcfgd_regex.json @@ -0,0 +1,13 @@ +[ + { + "tag": "bgp_down", + "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .down.", + "params": [ "peer_ip" ] + }, + { + "tag": "bgp_up", + "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .up.", + "params": [ "peer_ip" ] + } +] + diff --git a/dockers/docker-fpm-frr/events_info.json b/dockers/docker-fpm-frr/events_info.json new file mode 100644 index 000000000000..fb6fa9bdc6e4 --- /dev/null +++ b/dockers/docker-fpm-frr/events_info.json @@ -0,0 +1,10 @@ +{ + "yang_module": "sonic-events-bgp", + "proclist": [ + { + "name": "bgpcfgd", + "parse_json": "bgpcfgd_regex.json" + } + ] +} + diff --git a/files/build_templates/rsyslog_plugin.conf.j2 b/files/build_templates/rsyslog_plugin.conf.j2 new file mode 100644 index 000000000000..ec19c62a78f6 --- /dev/null +++ b/files/build_templates/rsyslog_plugin.conf.j2 @@ -0,0 +1,19 @@ +## rsyslog-plugin for streaming telemetry via gnmi + + + +template(name="prog_msg" type="list") { + property(name="msg") + constant(value="\n") +} + +$ModLoad omprog + +{% for proc in proclist %} +if re_match($programname, "{{ proc.name }}") then { + action(type="omprog" + binary="/usr/share/sonic/scripts/rsyslog_plugin -r /etc/rsyslog.d/{{ proc.parse_json }} -m {{ yang_module }}" + output="/var/log/rsyslog_plugin.log" + template="prog_msg") +} +{% endfor %} diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index d3caf046ccf9..35187312b79d 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -763,6 +763,10 @@ sudo bash -c "echo { > $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_name {% endfor %} sudo bash -c "echo } >> $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_names.json" +# copy rsyslog plugin binary for use by all dockers that use plugin to publish events. +sudo mkdir -p ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS} +sudo cp ${files_path}/rsyslog_plugin ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS}/ + {% for script in installer_start_scripts.split(' ') -%} if [ -f $TARGET_MACHINE"_{{script}}" ]; then sudo cp $TARGET_MACHINE"_{{script}}" $FILESYSTEM_ROOT/usr/bin/{{script}} diff --git a/rules/docker-config-engine-bullseye.mk b/rules/docker-config-engine-bullseye.mk index 0c4aad67a8d7..727449ce2250 100644 --- a/rules/docker-config-engine-bullseye.mk +++ b/rules/docker-config-engine-bullseye.mk @@ -8,7 +8,8 @@ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DEPENDS += $(LIBSWSSCOMMON) \ $(LIBYANG_CPP) \ $(LIBYANG_PY3) \ $(PYTHON3_SWSSCOMMON) \ - $(SONIC_DB_CLI) + $(SONIC_DB_CLI) \ + $(SONIC_EVENTD) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SWSSSDK_PY3) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(SONIC_YANG_MGMT_PY3) \ @@ -16,6 +17,8 @@ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_LOAD_DOCKERS += $(DOCKER_BASE_BULLSEYE) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(SWSS_VARS_TEMPLATE) +$(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(RSYSLOG_PLUGIN_CONF_J2) +$(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $($(DOCKER_EVENTD)_PLUGIN) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $($(SONIC_CTRMGRD)_CONTAINER_SCRIPT) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS = $($(DOCKER_BASE_BULLSEYE)_DBG_DEPENDS) \ diff --git a/rules/docker-config-engine-buster.mk b/rules/docker-config-engine-buster.mk index 079fc6dd074c..1cc9d416009d 100644 --- a/rules/docker-config-engine-buster.mk +++ b/rules/docker-config-engine-buster.mk @@ -16,6 +16,7 @@ $(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3) $(DOCKER_CONFIG_ENGINE_BUSTER)_LOAD_DOCKERS += $(DOCKER_BASE_BUSTER) $(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $(SWSS_VARS_TEMPLATE) +$(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $(RSYSLOG_PLUGIN_CONF_J2) $(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $($(SONIC_CTRMGRD)_CONTAINER_SCRIPT) $(DOCKER_CONFIG_ENGINE_BUSTER)_DBG_DEPENDS = $($(DOCKER_BASE_BUSTER)_DBG_DEPENDS) \ diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk index 15aca80df2db..5a546aca1cb5 100644 --- a/rules/docker-eventd.mk +++ b/rules/docker-eventd.mk @@ -37,4 +37,10 @@ $(DOCKER_EVENTD)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro SONIC_BULLSEYE_DOCKERS += $(DOCKER_EVENTD) SONIC_BULLSEYE_DBG_DOCKERS += $(DOCKER_EVENTD_DBG) -SONIC_COPY_FILES += $((SONIC_EVENTD)_SRC_PATH)/rsyslog_plugin +$(DOCKER_EVENTD)_FILESPATH = $($(SONIC_EVENTD)_SRC_PATH)/rsyslog_plugin + +$(DOCKER_EVENTD)_PLUGIN = rsyslog_plugin +$($(DOCKER_EVENTD)_PLUGIN)_PATH = $($(DOCKER_EVENTD)_FILESPATH) + +SONIC_COPY_FILES += $($(DOCKER_EVENTD)_PLUGIN) + diff --git a/rules/scripts.mk b/rules/scripts.mk index ce6a8eb90025..12919d520b09 100644 --- a/rules/scripts.mk +++ b/rules/scripts.mk @@ -32,6 +32,9 @@ $(SWSS_VARS_TEMPLATE)_PATH = files/build_templates COPP_CONFIG_TEMPLATE = copp_cfg.j2 $(COPP_CONFIG_TEMPLATE)_PATH = files/image_config/copp +RSYSLOG_PLUGIN_CONF_J2 = rsyslog_plugin.conf.j2 +$(RSYSLOG_PLUGIN_CONF_J2)_PATH = files/build_templates + SONIC_COPY_FILES += $(CONFIGDB_LOAD_SCRIPT) \ $(ARP_UPDATE_SCRIPT) \ $(ARP_UPDATE_VARS_TEMPLATE) \ @@ -42,4 +45,5 @@ SONIC_COPY_FILES += $(CONFIGDB_LOAD_SCRIPT) \ $(SYSCTL_NET_CONFIG) \ $(UPDATE_CHASSISDB_CONFIG_SCRIPT) \ $(SWSS_VARS_TEMPLATE) \ + $(RSYSLOG_PLUGIN_CONF_J2) \ $(COPP_CONFIG_TEMPLATE) From a5f45b805ef3f79de328c3a6fcc7fa650ad72675 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 11 Jul 2022 03:02:22 +0000 Subject: [PATCH 49/66] add docker mountpath --- files/build_templates/docker_image_ctl.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/files/build_templates/docker_image_ctl.j2 b/files/build_templates/docker_image_ctl.j2 index f0a1dcccbaae..1e0f7fd04912 100644 --- a/files/build_templates/docker_image_ctl.j2 +++ b/files/build_templates/docker_image_ctl.j2 @@ -500,6 +500,7 @@ start() { {%- endif -%} {%- if docker_container_name == "bgp" %} -v /etc/sonic/frr/$DEV:/etc/frr:rw \ + -v /usr/share/sonic/scripts:/usr/share/sonic/scripts:ro \ {%- endif %} {%- if docker_container_name == "database" %} $DB_OPT \ From 9423d0d0781a1470b66727a09ffcda2599829ebd Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 11 Jul 2022 16:54:52 +0000 Subject: [PATCH 50/66] minor update per discussion --- dockers/docker-fpm-frr/{bgpcfgd_regex.json => bgp_regex.json} | 0 dockers/docker-fpm-frr/events_info.json | 4 ++-- files/build_templates/rsyslog_plugin.conf.j2 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename dockers/docker-fpm-frr/{bgpcfgd_regex.json => bgp_regex.json} (100%) diff --git a/dockers/docker-fpm-frr/bgpcfgd_regex.json b/dockers/docker-fpm-frr/bgp_regex.json similarity index 100% rename from dockers/docker-fpm-frr/bgpcfgd_regex.json rename to dockers/docker-fpm-frr/bgp_regex.json diff --git a/dockers/docker-fpm-frr/events_info.json b/dockers/docker-fpm-frr/events_info.json index fb6fa9bdc6e4..66fa9a727ae2 100644 --- a/dockers/docker-fpm-frr/events_info.json +++ b/dockers/docker-fpm-frr/events_info.json @@ -2,8 +2,8 @@ "yang_module": "sonic-events-bgp", "proclist": [ { - "name": "bgpcfgd", - "parse_json": "bgpcfgd_regex.json" + "name": "bgp", + "parse_json": "bgp_regex.json" } ] } diff --git a/files/build_templates/rsyslog_plugin.conf.j2 b/files/build_templates/rsyslog_plugin.conf.j2 index ec19c62a78f6..6dc46552e806 100644 --- a/files/build_templates/rsyslog_plugin.conf.j2 +++ b/files/build_templates/rsyslog_plugin.conf.j2 @@ -10,7 +10,7 @@ template(name="prog_msg" type="list") { $ModLoad omprog {% for proc in proclist %} -if re_match($programname, "{{ proc.name }}") then { +if startswith($programname, "{{ proc.name }}") then { action(type="omprog" binary="/usr/share/sonic/scripts/rsyslog_plugin -r /etc/rsyslog.d/{{ proc.parse_json }} -m {{ yang_module }}" output="/var/log/rsyslog_plugin.log" From e5be04f324d6ee82d96ff4a520514e79497b8e0c Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Mon, 11 Jul 2022 23:53:40 +0000 Subject: [PATCH 51/66] corrected per YANG schema --- dockers/docker-fpm-frr/bgp_regex.json | 11 +++-------- files/build_templates/rsyslog_plugin.conf.j2 | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/dockers/docker-fpm-frr/bgp_regex.json b/dockers/docker-fpm-frr/bgp_regex.json index 7bc361c5e21d..aa27824386d1 100644 --- a/dockers/docker-fpm-frr/bgp_regex.json +++ b/dockers/docker-fpm-frr/bgp_regex.json @@ -1,13 +1,8 @@ [ { - "tag": "bgp_down", - "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .down.", - "params": [ "peer_ip" ] - }, - { - "tag": "bgp_up", - "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .up.", - "params": [ "peer_ip" ] + "tag": "bgp-state", + "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .(up|down).", + "params": [ "peer_ip", "status" ] } ] diff --git a/files/build_templates/rsyslog_plugin.conf.j2 b/files/build_templates/rsyslog_plugin.conf.j2 index 6dc46552e806..ec19c62a78f6 100644 --- a/files/build_templates/rsyslog_plugin.conf.j2 +++ b/files/build_templates/rsyslog_plugin.conf.j2 @@ -10,7 +10,7 @@ template(name="prog_msg" type="list") { $ModLoad omprog {% for proc in proclist %} -if startswith($programname, "{{ proc.name }}") then { +if re_match($programname, "{{ proc.name }}") then { action(type="omprog" binary="/usr/share/sonic/scripts/rsyslog_plugin -r /etc/rsyslog.d/{{ proc.parse_json }} -m {{ yang_module }}" output="/var/log/rsyslog_plugin.log" From 13a7f47763fef8132c747e6e63c1cf3923d1929a Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 13 Jul 2022 17:47:24 +0000 Subject: [PATCH 52/66] Enabled cacheing to kick off right upon startup --- src/sonic-eventd/src/eventd.cpp | 21 +++++- src/sonic-eventd/src/eventd.h | 1 + src/sonic-eventd/tests/eventd_ut.cpp | 99 ++++++++++++++++++---------- 3 files changed, 85 insertions(+), 36 deletions(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 6a008da22e88..3c754fa7c65d 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -103,6 +103,10 @@ validate_event(const internal_event_t &event, runtime_id_t &rid, sequence_t &seq } +/* + * Initialize cache with set of events provided. + * Events read by cache service will be appended + */ void capture_service::init_capture_cache(const event_serialized_lst_t &lst) { @@ -137,6 +141,8 @@ capture_service::do_capture() int rc; int block_ms=300; int init_cnt; + event_handle_t subs_handle = NULL; + void *sock = NULL; typedef enum { /* @@ -154,7 +160,10 @@ capture_service::do_capture() cap_state_t cap_state = CAP_STATE_INIT; - void *sock = zmq_socket(m_ctx, ZMQ_SUB); + subs_handle = events_init_subscriber(); + RET_ON_ERR(subs_handle != NULL, "failed to subscribe to all"); + + sock = zmq_socket(m_ctx, ZMQ_SUB); RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); rc = zmq_connect(sock, get_config(string(CAPTURE_END_KEY)).c_str()); @@ -275,6 +284,7 @@ capture_service::do_capture() * Capture stop will close the socket which fail the read * and hence bail out. */ + events_deinit_subscriber(subs_handle); zmq_close(sock); m_cap_run = false; return; @@ -383,6 +393,15 @@ run_eventd_service() RET_ON_ERR(service.init_server(zctx) == 0, "Failed to init service"); + /* + * Start cache service, right upon eventd starts so as not to lose + * events until telemetry starts. + * Telemetry will send a stop & collect cache upon startup + */ + capture = new capture_service(zctx, cache_max); + RET_ON_ERR(capture->set_control(INIT_CAPTURE) == 0, "Failed to init capture"); + RET_ON_ERR(capture->set_control(START_CAPTURE) == 0, "Failed to start capture"); + while(code != EVENT_EXIT) { int resp = -1; event_serialized_lst_t req_data, resp_data; diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h index b5a7f9a2ab5e..6273497b0cc2 100644 --- a/src/sonic-eventd/src/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -2,6 +2,7 @@ * Header file for eventd daemon */ #include "events_service.h" +#include "events.h" typedef map last_events_t; diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 101944fbeb8d..227d7a9b78e0 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -212,6 +212,9 @@ void *init_pub(void *zctx) EXPECT_TRUE(NULL != mock_pub); EXPECT_EQ(0, zmq_connect(mock_pub, get_config(XSUB_END_KEY).c_str())); + /* Provide time for async connect to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + return mock_pub; } @@ -264,9 +267,6 @@ TEST(eventd, proxy) /* Init pub connection */ void *mock_pub = init_pub(zctx); - /* Provide time for async connect to complete */ - this_thread::sleep_for(chrono::milliseconds(100)); - EXPECT_TRUE(5 < ARRAY_SIZE(ldata)); for(int i=0; i<5; ++i) { @@ -312,10 +312,6 @@ TEST(eventd, capture) printf("Capture TEST started\n"); debug_on(); - /* - * Need to run subscriber; Else publisher would skip publishing - * in the absence of any subscriber. - */ bool term_sub = false; string sub_source; int sub_evts_sz = 0; @@ -345,7 +341,7 @@ TEST(eventd, capture) /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); - + /* Create capture service */ capture_service *pcap = new capture_service(zctx, cache_max); @@ -370,6 +366,8 @@ TEST(eventd, capture) /* * Collect events to publish for capture to cache * re-publishing some events sent in cache. + * Hence i=1, when first init_cache events are already + * in crash. */ for(int i=1; i < ARRAY_SIZE(ldata); ++i) { internal_event_t ev(create_ev(ldata[i])); @@ -395,9 +393,6 @@ TEST(eventd, capture) /* Init pub connection */ void *mock_pub = init_pub(zctx); - /* Provide time for async connect to complete */ - this_thread::sleep_for(chrono::milliseconds(200)); - /* Publish events from 1 to all. */ run_pub(mock_pub, wr_source, wr_evts); @@ -527,9 +522,6 @@ TEST(eventd, captureCacheMax) /* Init pub connection */ void *mock_pub = init_pub(zctx); - /* Provide time for async connect to complete */ - this_thread::sleep_for(chrono::milliseconds(200)); - /* Publish events from 1 to all. */ run_pub(mock_pub, wr_source, wr_evts); @@ -590,12 +582,7 @@ TEST(eventd, service) printf("Service TEST started\n"); debug_on(); - /* capture related */ - int init_cache = 4; /* provided along with start capture */ - /* startup strings; expected list & read list from capture */ - event_serialized_lst_t evts_start, evts_read; - event_service service; void *zctx = zmq_ctx_new(); @@ -605,37 +592,79 @@ TEST(eventd, service) * Start the eventd server side service * It runs proxy & capture service * It uses its own zmq context + * It starts to capture too. */ thread thread_service(&run_eventd_service); /* Need client side service to interact with server side */ EXPECT_EQ(0, service.init_client(zctx)); - EXPECT_EQ(-1, service.cache_stop()); + { + /* eventd_service starts cache too; Test this caching */ + /* Init pub connection */ + void *mock_pub = init_pub(zctx); - EXPECT_TRUE(init_cache > 1); + internal_events_lst_t wr_evts; + int wr_sz = 2; + string wr_source("hello"); - /* Collect few serailized strings of events for startup cache */ - for(int i=0; i < init_cache; ++i) { - internal_event_t ev(create_ev(ldata[i])); - string evt_str; - serialize(ev, evt_str); - evts_start.push_back(evt_str); + /* Test service startup caching */ + event_serialized_lst_t evts_start, evts_read; + + for(int i=0; i 1); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + } + + + EXPECT_EQ(0, service.cache_init()); + EXPECT_EQ(0, service.cache_start(evts_start)); + + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, service.cache_stop()); + + /* Read the cache */ + EXPECT_EQ(0, service.cache_read(evts_read)); + + EXPECT_EQ(evts_read, evts_start); + } EXPECT_EQ(0, service.send_recv(EVENT_EXIT)); From 91d763328c6519ab8a8cc8da9f8caf163ec4e88a Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 13 Jul 2022 18:20:27 +0000 Subject: [PATCH 53/66] Added comments --- src/sonic-eventd/src/eventd.cpp | 6 ++++++ src/sonic-eventd/tests/eventd_ut.cpp | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 3c754fa7c65d..a379e824a930 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -160,6 +160,12 @@ capture_service::do_capture() cap_state_t cap_state = CAP_STATE_INIT; + /* + * Need subscription for publishers to publish. Start one. + * As we are reading off of capture socket, we don't read from + * this handle. Not reading is a not a concern, as zmq will cache + * few initial messages and rest it will drop. + */ subs_handle = events_init_subscriber(); RET_ON_ERR(subs_handle != NULL, "failed to subscribe to all"); diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index 227d7a9b78e0..d6f07f23de0f 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -341,7 +341,7 @@ TEST(eventd, capture) /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); - + /* Create capture service */ capture_service *pcap = new capture_service(zctx, cache_max); From dec3bc5a88317ea6e417c54095bd98f768189ac4 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 14 Jul 2022 17:54:37 +0000 Subject: [PATCH 54/66] Yang models are removed as they are being handled separately by Zain --- .../yang-events/sonic-events-bgp.yang | 97 --------- .../yang-events/sonic-events-common.yang | 46 ----- .../yang-events/sonic-events-dhcp_relay.yang | 79 -------- .../yang-events/sonic-events-host.yang | 188 ------------------ .../yang-events/sonic-events-pmon.yang | 42 ---- .../yang-events/sonic-events-swss.yang | 132 ------------ .../yang-events/sonic-events-syncd.yang | 60 ------ 7 files changed, 644 deletions(-) delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-bgp.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-common.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-host.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-pmon.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-swss.yang delete mode 100644 src/sonic-yang-models/yang-events/sonic-events-syncd.yang diff --git a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang b/src/sonic-yang-models/yang-events/sonic-events-bgp.yang deleted file mode 100644 index 6c275c754e4b..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-bgp.yang +++ /dev/null @@ -1,97 +0,0 @@ -module sonic-events-bgp { - namespace "http://github.com/Azure/sonic-events-bgp"; - - yang-version 1.1; - - import sonic-events-common { - prefix evtcmn; - } - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - revision 2022-12-01 { - description "BGP alert events."; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC BGP events"; - - container bgp-state { - oc-alarm-types:MINOR - - description " - Declares an event for BGP state for a neighbor IP going up/down.; - - leaf ip { - type inet:ip-address; - description "IP of neighbor"; - } - - leaf status { - type enumeration { - enum "up"; - enum "down"; - - } - description "Provides the status as up (true) or down (false)"; - } - - uses evtcmn:sonic-events-cmn; - } - - container notification { - oc-alarm-types:MAJOR - - description " - Reports an notification. - The error codes as per IANA. - The other params are as in the message"; - - leaf major-code { - type uint8; - description "Major IANA error code; [RFC4271][RFC7313]"; - } - - leaf minor-code { - type uint8; - description "Minor IANA error code; [RFC4271][RFC7313]"; - } - - leaf ip { - type inet:ip-address; - description "IP of neighbor associated with this notification"; - } - - leaf ASN { - type uint32; - description "ASN number from the notification"; - } - - leaf is-sent { - type boolean; - description "true - if this notification was for sent messages; false if it was for received."; - } - - uses evtcmn:sonic-events-cmn; - } - - container zebra-no-buff { - oc-alarm-types:MAJOR - - description " - Declares an event for zebra running out of buffer. - This event does not have any other parameter. - Hence source + tag identifies an event"; - - uses evtcmn:sonic-events-cmn; - } -} - diff --git a/src/sonic-yang-models/yang-events/sonic-events-common.yang b/src/sonic-yang-models/yang-events/sonic-events-common.yang deleted file mode 100644 index bbff8f87796e..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-common.yang +++ /dev/null @@ -1,46 +0,0 @@ -module sonic-events-common { - namespace "http://github.com/Azure/sonic-events-common"; - prefix evtcmn; - yang-version 1.1; - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC Events common definition"; - - revision 2022-12-01 { - description - "Common reusable definitions"; - } - - grouping sonic-events-cmn { - leaf timestamp { - type yang::date-and-time; - description "time of the event"; - } - } - - grouping sonic-events-usage { - leaf usage { - type uint8 { - range "0..100" { - error-message "Incorrect val for %"; - } - } - description "Percentage in use"; - } - - leaf limit { - type uint8 { - range "0..100" { - error-message "Incorrect val for %"; - } - } - description "Percentage limit set"; - } - } -} diff --git a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang b/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang deleted file mode 100644 index b4a07555a21a..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-dhcp_relay.yang +++ /dev/null @@ -1,79 +0,0 @@ -module sonic-events-dhcp-relay { - namespace "http://github.com/sonic-net/sonic-events-dhcp-relay"; - yang-version 1.1; - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - import sonic-events-common { - prefix evtcmn; - } - - revision 2022-12-01 { - description "dhcp-relay alert events."; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC dhcp-relay events"; - - container dhcp-relay-discard { - oc-alarm-types:MAJOR - - description " - Declares an event for dhcp-relay discarding packet on an - interface due to missing IP address assigned. - Params: - name of the interface discarding. - class of the missing IP address as IPv4 or IPv6."; - - leaf ip_class { - type enumeration { - enum "ipV4"; - enum "ipV6"; - } - description "Class of IP address missing"; - } - - leaf ifname { - type string; - description "Name of the i/f discarding"; - } - - type leafref { - path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; - } - - uses evtcmn:sonic-events-cmn; - } - - container dhcp-relay-disparity { - oc-alarm-types:MAJOR - - description " - Declares an event for disparity detected in - DHCP Relay behavior by dhcpmon. - parameters: - vlan that shows this disparity - The duration of disparity"; - - leaf vlan { - type string; - description "Name of the vlan affected"; - } - - leaf duration { - type uint32; - description "Duration of disparity"; - } - - uses evtcmn:sonic-events-cmn; - } -} - diff --git a/src/sonic-yang-models/yang-events/sonic-events-host.yang b/src/sonic-yang-models/yang-events/sonic-events-host.yang deleted file mode 100644 index 95da1106ee5c..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-host.yang +++ /dev/null @@ -1,188 +0,0 @@ -module events-host { - namespace "http://github.com/sonic-net/sonic-events-host"; - yang-version 1.1; - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - import sonic-events-common { - prefix evtcmn; - } - revision 2022-12-01 { - description "BGP alert events."; - } - - container disk-usage { - oc-alarm-types:MINOR - - description " - Declares an event for disk usage crossing set limit - The parameters describe the usage & limit set."; - - leaf fs { - type string; - description "Name of the file system"; - default ""; - } - - uses evtcmn:sonic-events-usage; - - uses evtcmn:sonic-events-cmn; - } - - container memory-usage { - oc-alarm-types:MINOR - - description " - Declares an event for memory usage crossing set limit - The parameters describe the usage & limit set."; - - uses evtcmn:sonic-events-usage; - - uses evtcmn:sonic-events-cmn; - } - - container event-sshd { - oc-alarm-types:MINOR - - description " - Declares an event reported by sshd. - The fail type declares the type of failure. - INCORRECT_PASSWORD - denotes that sshd is sending - wrong password to AAA to intentionally fail this login."; - - leaf fail_type { - type enumeration { - enum "INCORRECT_PASSWD"; - } - description "Type of failure"; - } - - uses evtcmn:sonic-events-cmn; - } - - container event-disk { - oc-alarm-types:MINOR - - description " - Declares an event reported by disk check. - The fail type declares the type of failure. - read-only - denotes that disk is in RO state."; - - leaf fail_type { - type enumeration { - enum "read_only"; - } - description "Type of failure"; - } - - uses evtcmn:sonic-events-cmn; - } - - container event-kernel { - oc-alarm-types:MINOR - - description " - Declares an event reported by kernel. - The fail type declares the type of failure."; - - leaf fail_type { - type enumeration { - enum "write_failed"; - enum "write_protected"; - enum "remount_read_only"; - enum "aufs_read_lock"; - enum "invalid_freelist"; - enum "zlib_decompress"; - } - description "Type of failure"; - } - - leaf msg { - type string; - description "human readable hint text"; - default ""; - } - - uses evtcmn:sonic-events-cmn; - } - - container event-monit-proc { - evtcmn:severity "2" - - description " - Declares an event reported by monit for a process - that is not running. - - Params: - Name of the process that is not running. - The ASIC-index of that process."; - - leaf proc_name { - type string; - description "Name of the process not running"; - default ""; - } - - leaf asic_index { - type uint8; - description "ASIC index in case of multi asic platform"; - default 0; - } - - uses evtcmn:sonic-events-cmn; - } - - container event-monit-status { - evtcmn:severity "2" - - description " - Declares an event reported by monit for status check - failure for a process - - Params: - Name of the process that is not running. - The ASIC-index of that process."; - - leaf entity { - type string; - description "Name of the failing entity"; - default ""; - } - - leaf asic_index { - type uint8; - description "ASIC index in case of multi asic platform"; - default 0; - } - - leaf reason { - type string; - description "Human readble text explaining failure"; - default ""; - } - - uses evtcmn:sonic-events-cmn; - } - - container event-platform { - evtcmn:severity "2" - - description " - Declares an event for platform related failure. - Params: - fail_type provides the type of failure."; - - leaf fail_type { - type enumeration { - enum "watchdog_timeout"; - enum "switch_parity_error"; - enum "SEU_error"; - } - description "Type of failure"; - } - - uses evtcmn:sonic-events-cmn; - } -} diff --git a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang b/src/sonic-yang-models/yang-events/sonic-events-pmon.yang deleted file mode 100644 index 6439eacaafc3..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-pmon.yang +++ /dev/null @@ -1,42 +0,0 @@ -module sonic-events-pmon { - namespace "http://github.com/sonic-net/sonic-events-pmon"; - yang-version 1.1; - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - import sonic-events-common { - prefix evtcmn; - } - - revision 2022-12-01 { - description "pmon alert events."; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC pmon events"; - - - container pmon-exited { - oc-alarm-types:MAJOR - - description " - Declares an event reportes by pmon for an unexpected exit. - The exited entity is the only param"; - - leaf entity { - type string; - description "entity that had unexpected exit"; - } - - uses evtcmn:sonic-events-cmn; - } -} - diff --git a/src/sonic-yang-models/yang-events/sonic-events-swss.yang b/src/sonic-yang-models/yang-events/sonic-events-swss.yang deleted file mode 100644 index 50ee7471b829..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-swss.yang +++ /dev/null @@ -1,132 +0,0 @@ -module sonic-events-swss { - namespace "http://github.com/sonic-net/sonic-events-swss"; - yang-version 1.1; - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - import sonic-events-common { - prefix evtcmn; - } - - revision 2022-12-01 { - description "SWSS alert events."; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC SWSS events"; - - container redis-generic { - oc-alarm-types:MAJOR - - description " - Declares an event for a fatal error encountered by swss. - The asic-index of the failing process is the only param."; - - leaf asic_index { - type uint8; - description "ASIC index in case of multi asic platform"; - default 0; - } - - uses evtcmn:sonic-events-cmn; - } - - container if-state { - oc-alarm-types:MINOR - - description " - Declares an event for i/f flap. - - The name of the flapping i/f and status are the only params."; - - leaf ifname { - type string; - description "Interface name"; - } - - type leafref { - path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; - } - - leaf status { - type enumeration { - enum "up"; - enum "down"; - } - description "Provides the status as up (true) or down (false)"; - } - - uses evtcmn:sonic-events-cmn; - - } - - container pfc-storm { - oc-alarm-types:MAJOR - - description " - Declares an event for PFC storm. - - The name of the i/f facing the storm is the only param."; - - leaf ifname { - type string; - description "Interface name"; - } - - type leafref { - path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; - } - - leaf queue_index { - type uint8; - } - - leaf queue_id { - type uint64_t; - } - - leaf port_id { - type uint64_t; - } - - leaf timestamp { - type yang::date-and-time; - description "time of the event"; - } - } - - container chk_crm_threshold { - oc-alarm-types:MAJOR - - description " - Declares an event for CRM threshold."; - - leaf percent { - type uint8 { - range "0..100" { - error-message "Invalid percentage value"; - } - } - description "percentage used"; - } - - leaf used_cnt { - type uint8; - } - - leaf free_cnt { - type uint64_t; - } - - uses evtcmn:sonic-events-cmn; - } -} - diff --git a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang b/src/sonic-yang-models/yang-events/sonic-events-syncd.yang deleted file mode 100644 index 8a8a62579c99..000000000000 --- a/src/sonic-yang-models/yang-events/sonic-events-syncd.yang +++ /dev/null @@ -1,60 +0,0 @@ -module sonic-events-syncd { - namespace "http://github.com/sonic-net/sonic-events-syncd"; - yang-version 1.1; - - import openconfig-alarm-types { - prefix oc-alarm-types; - } - - import sonic-events-common { - prefix evtcmn; - } - - revision 2022-12-01 { - description "syncd alert events."; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description - "SONIC syncd events"; - - container syncd_failure { - oc-alarm-types:MAJOR - - description " - Declares an event for all types of syncd failure. - The type of failure and the asic-index of failing syncd are - provided along with a human readable message to give the - dev debugging additional info."; - - leaf asic_index { - type uint8; - description "ASIC index in case of multi asic platform"; - default 0; - } - - leaf fail_type { - type enumeration { - enum "route_add_failed"; - enum "switch_event_2"; - enum "brcm_sai_switch_assert"; - enum "assert"; - enum "mmu_err"; - } - } - - leaf msg { - type string; - description "human readable hint text" - default ""; - } - - uses evtcmn:sonic-events-cmn; - } -} - From 2e1d8f954b482944ebb6bc2bec0192ada2f0630d Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 15 Jul 2022 13:39:32 +0000 Subject: [PATCH 55/66] Updated target; Added plugin dependency to image --- files/build_templates/eventd.service.j2 | 2 +- rules/docker-config-engine-bullseye.mk | 1 - slave.mk | 1 + src/sonic-eventd/src/eventd.cpp | 7 +++++++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/files/build_templates/eventd.service.j2 b/files/build_templates/eventd.service.j2 index 0ad7f52ee83d..1aaedefd1227 100644 --- a/files/build_templates/eventd.service.j2 +++ b/files/build_templates/eventd.service.j2 @@ -14,4 +14,4 @@ ExecStop=/usr/bin/{{docker_container_name}}.sh stop RestartSec=30 [Install] -WantedBy=sonic.target +WantedBy=multi-user.target diff --git a/rules/docker-config-engine-bullseye.mk b/rules/docker-config-engine-bullseye.mk index 727449ce2250..7e649b987c93 100644 --- a/rules/docker-config-engine-bullseye.mk +++ b/rules/docker-config-engine-bullseye.mk @@ -18,7 +18,6 @@ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_LOAD_DOCKERS += $(DOCKER_BASE_BULLSEYE) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(SWSS_VARS_TEMPLATE) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(RSYSLOG_PLUGIN_CONF_J2) -$(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $($(DOCKER_EVENTD)_PLUGIN) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $($(SONIC_CTRMGRD)_CONTAINER_SCRIPT) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS = $($(DOCKER_BASE_BULLSEYE)_DBG_DEPENDS) \ diff --git a/slave.mk b/slave.mk index 76274b21b39e..e884aa4416e4 100644 --- a/slave.mk +++ b/slave.mk @@ -1076,6 +1076,7 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_INSTALLERS)) : $(TARGET_PATH)/% : \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_YANG_MODELS_PY3)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_CTRMGRD)) \ $(addprefix $(FILES_PATH)/,$($(SONIC_CTRMGRD)_FILES)) \ + $(addprefix $(FILES_PATH)/,$($(DOCKER_EVENTD)_PLUGIN)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_YANG_MGMT_PY3)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SYSTEM_HEALTH)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_HOST_SERVICES_PY3)) diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index a379e824a930..9b164fbe38b3 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -228,6 +228,13 @@ capture_service::do_capture() switch(cap_state) { case CAP_STATE_INIT: + /* + * In this state check against cache, if duplicate + * When duplicate or new one seen, remove the entry from pre-exist map + * Stay in this state, until the pre-exist cache is empty or as many + * messages as in cache are seen, as in worst case even if you see + * duplicate of each, it will end with first m_events.size() + */ { bool add = true; init_cnt--; From b6484c2953c6f73119ebe2fc74c1b7e738ec057e Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Fri, 15 Jul 2022 17:44:11 +0000 Subject: [PATCH 56/66] tool update --- src/sonic-eventd/Makefile | 6 +++--- src/sonic-eventd/tools/events_tool.cpp | 22 +++++++++++++--------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index ed54ea170e7d..16aced1716bd 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -7,7 +7,7 @@ RSYSLOG-PLUGIN_TEST := rsyslog_plugin_tests/tests CP := cp MKDIR := mkdir CC := g++ -MV := mv +CP := cp LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid TEST_LIBS := -L/usr/src/gtest -lgtest -lgtest_main -lgmock -lgmock_main @@ -69,8 +69,8 @@ rsyslog-plugin-tests: $(RSYSLOG-PLUGIN-TEST_OBJS) install: $(MKDIR) -p $(DESTDIR)/usr/sbin - $(MV) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin - $(MV) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin deinstall: $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 4e42591d4596..8da0a8bf396e 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -61,6 +61,7 @@ Note:\n\ ]\n\ Default: \n\ \n\ +-c - Use offline cache in receive mode\n\ -o - O/p file to write received events\n\ Default: STDOUT\n"; @@ -86,7 +87,7 @@ t_map_to_str(const Map &m) } void -do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt=0, int pause=0) +do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt, int pause, bool use_cache) { int index=0, total_missed = 0; ostream* fp = &cout; @@ -99,7 +100,9 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt printf("outfile=%s set\n", outfile.c_str()); } } - event_handle_t h = events_init_subscriber(false, 100, filter.empty() ? NULL : &filter); + event_handle_t h = events_init_subscriber(use_cache, 2000, filter.empty() ? NULL : &filter); + printf("Subscribed with use_cache=%d timeout=2000 filter %s\n", + use_cache, filter.empty() ? "empty" : "non-empty"); ASSERT(h != NULL, "Failed to get subscriber handle"); while(!term_receive) { @@ -130,10 +133,6 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt break; } } - if (pause) { - /* Pause between two sends */ - this_thread::sleep_for(chrono::milliseconds(pause)); - } } events_deinit_subscriber(h); @@ -277,6 +276,7 @@ void usage() int main(int argc, char **argv) { + bool use_cache = false; int op = OP_INIT; int cnt=0, pause=0; string json_str_msg, outfile("STDOUT"), infile; @@ -284,8 +284,12 @@ int main(int argc, char **argv) for(;;) { - switch(getopt(argc, argv, "srn:p:i:o:f:")) // note the colon (:) to indicate that 'b' has a parameter and is not a switch + switch(getopt(argc, argv, "srn:p:i:o:f:c")) // note the colon (:) to indicate that 'b' has a parameter and is not a switch { + case 'c': + use_cache = true; + continue; + case 's': op |= OP_SEND; continue; @@ -339,14 +343,14 @@ int main(int argc, char **argv) op, cnt, pause, infile.c_str(), outfile.c_str()); if (op == OP_SEND_RECV) { - thread thr(&do_receive, filter, outfile, 0, 0); + thread thr(&do_receive, filter, outfile, 0, 0, use_cache); do_send(infile, cnt, pause); } else if (op == OP_SEND) { do_send(infile, cnt, pause); } else if (op == OP_RECV) { - do_receive(filter, outfile, cnt, pause); + do_receive(filter, outfile, cnt, pause, use_cache); } else { ASSERT(false, "Elect -s for send or -r receive or both; Bailing out with no action\n"); From 13b8271c67b77799b339318378c0f52069477dc6 Mon Sep 17 00:00:00 2001 From: zain Date: Wed, 20 Jul 2022 22:21:02 +0000 Subject: [PATCH 57/66] Add lua code and formatter for rsyslog plugin --- src/sonic-eventd/Makefile | 4 +- .../rsyslog_plugin/rsyslog_plugin.cpp | 30 +++- .../rsyslog_plugin/rsyslog_plugin.h | 10 +- src/sonic-eventd/rsyslog_plugin/subdir.mk | 6 +- .../rsyslog_plugin/syslog_parser.cpp | 34 +++- .../rsyslog_plugin/syslog_parser.h | 9 +- .../rsyslog_plugin/timestamp_formatter.cpp | 78 +++++++++ .../rsyslog_plugin/timestamp_formatter.h | 28 ++++ .../rsyslog_plugin_ut.cpp | 156 ++++++++++++++++-- .../rsyslog_plugin_tests/subdir.mk | 8 +- .../rsyslog_plugin_tests/test_regex_1.rc.json | 1 - .../rsyslog_plugin_tests/test_regex_2.rc.json | 4 +- .../rsyslog_plugin_tests/test_regex_4.rc.json | 2 +- 13 files changed, 328 insertions(+), 42 deletions(-) create mode 100644 src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index ed54ea170e7d..d09743437929 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -8,7 +8,7 @@ CP := cp MKDIR := mkdir CC := g++ MV := mv -LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid +LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid -llua5.1 TEST_LIBS := -L/usr/src/gtest -lgtest -lgtest_main -lgmock -lgmock_main CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common @@ -26,7 +26,7 @@ endif -include rsyslog_plugin/subdir.mk -include rsyslog_plugin_tests/subdir.mk -all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin +all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin rsyslog-plugin-tests sonic-eventd: $(OBJS) @echo 'Building target: $@' diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index 854570c57b7e..3629602fdb43 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -2,20 +2,28 @@ #include #include #include +#include +#include #include "rsyslog_plugin.h" #include "json.hpp" using json = nlohmann::json; -bool RsyslogPlugin::onMessage(string msg) { +bool RsyslogPlugin::onMessage(string msg, lua_State* luaState) { string tag; event_params_t paramDict; - if(!m_parser->parseMessage(msg, tag, paramDict)) { + if(!m_parser->parseMessage(msg, tag, paramDict, luaState)) { SWSS_LOG_DEBUG("%s was not able to be parsed into a structured event\n", msg.c_str()); return false; } else { + string timestamp = paramDict["timestamp"]; + string formattedTimestamp = m_timestampFormatter->changeTimestampFormat(paramDict["timestamp"]); + if(timestamp.empty()) { + SWSS_LOG_ERROR("Timestamp Formatter was unable to format %s.\n", timestamp.c_str()); + } + paramDict["timestamp"] = formattedTimestamp; int returnCode = event_publish(m_eventHandle, tag, ¶mDict); - if (returnCode != 0) { + if (returnCode != 0 || timestamp.empty()) { SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s.\n", tag.c_str()); return false; } @@ -37,14 +45,17 @@ bool RsyslogPlugin::createRegexList() { return false; } - string regexString; regex expression; for(long unsigned int i = 0; i < m_parser->m_regexList.size(); i++) { + string regexString = "([a-zA-Z]{3} [0-9]{1,2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}) "; try { - regexString = m_parser->m_regexList[i]["regex"]; + string givenRegex = m_parser->m_regexList[i]["regex"]; + regexString += givenRegex; string tag = m_parser->m_regexList[i]["tag"]; vector params = m_parser->m_regexList[i]["params"]; + params.insert(params.begin(), "timestamp"); // each event will have timestamp so inserting it + m_parser->m_regexList[i]["params"] = params; regex expr(regexString); expression = expr; } catch (domain_error& deException) { @@ -56,23 +67,28 @@ bool RsyslogPlugin::createRegexList() { } m_parser->m_expressions.push_back(expression); } + if(m_parser->m_expressions.empty()) { SWSS_LOG_ERROR("Empty list of regex expressions.\n"); return false; } + regexFile.close(); return true; } void RsyslogPlugin::run() { + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); while(true) { string line; getline(cin, line); if(line.empty()) { continue; } - onMessage(line); + onMessage(line, luaState); } + lua_close(luaState); } int RsyslogPlugin::onInit() { @@ -87,7 +103,9 @@ int RsyslogPlugin::onInit() { } RsyslogPlugin::RsyslogPlugin(string moduleName, string regexPath) { + const string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; m_parser = unique_ptr(new SyslogParser()); + m_timestampFormatter = unique_ptr(new TimestampFormatter(timestampFormatRegex)); m_moduleName = moduleName; m_regexPath = regexPath; } diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index 7a1082998c74..9d307a78a082 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -1,9 +1,16 @@ #ifndef RSYSLOG_PLUGIN_H #define RSYSLOG_PLUGIN_H +extern "C" +{ + #include + #include + #include +} #include #include #include "syslog_parser.h" +#include "timestamp_formatter.h" #include "events.h" #include "logger.h" @@ -19,11 +26,12 @@ using namespace swss; class RsyslogPlugin { public: int onInit(); - bool onMessage(string msg); + bool onMessage(string msg, lua_State* luaState); void run(); RsyslogPlugin(string moduleName, string regexPath); private: unique_ptr m_parser; + unique_ptr m_timestampFormatter; event_handle_t m_eventHandle; string m_regexPath; string m_moduleName; diff --git a/src/sonic-eventd/rsyslog_plugin/subdir.mk b/src/sonic-eventd/rsyslog_plugin/subdir.mk index 7e38de4b902f..17df55c718a0 100644 --- a/src/sonic-eventd/rsyslog_plugin/subdir.mk +++ b/src/sonic-eventd/rsyslog_plugin/subdir.mk @@ -1,9 +1,9 @@ CC := g++ -RSYSLOG-PLUGIN_TEST_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o -RSYSLOG-PLUGIN_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/main.o +RSYSLOG-PLUGIN-TEST_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/timestamp_formatter.o +RSYSLOG-PLUGIN_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/timestamp_formatter.o ./rsyslog_plugin/main.o -C_DEPS += ./rsyslog_plugin/rsyslog_plugin.d ./rsyslog_plugin/syslog_parser.d ./rsyslog_plugin/main.d +C_DEPS += ./rsyslog_plugin/rsyslog_plugin.d ./rsyslog_plugin/syslog_parser.d ./rsyslog_plugin/timestamp_formatter.d ./rsyslog_plugin/main.d rsyslog_plugin/%.o: rsyslog_plugin/%.cpp @echo 'Building file: $<' diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index 0ab1f7c9b103..4baa9b6b4dc0 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -10,7 +10,7 @@ * */ -bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap) { +bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap, lua_State* luaState) { for(long unsigned int i = 0; i < m_regexList.size(); i++) { smatch matchResults; vector params = m_regexList[i]["params"]; @@ -19,9 +19,35 @@ bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t } // found matching regex eventTag = m_regexList[i]["tag"]; - transform(params.begin(), params.end(), matchResults.begin() + 1, inserter(paramMap, paramMap.end()), [](string a, string b) { - return make_pair(a,b); - }); + // check params for lua code + for(long unsigned int j = 0; j < params.size(); j++) { + auto delimPos = params[j].find(':'); + string resultValue = matchResults[j + 1].str(); + if(delimPos != string::npos) { // have to execute lua script + string param = params[j].substr(0, delimPos); + string luaString = params[j].substr(delimPos + 1); + if(luaString.empty()) { // empty lua code + SWSS_LOG_INFO("Lua code missing after :, skipping operation"); + paramMap[param] = resultValue; + continue; + } + const char* luaCode = luaString.c_str(); + lua_pushstring(luaState, resultValue.c_str()); + lua_setglobal(luaState, "arg"); + if(luaL_dostring(luaState, luaCode) == 0) { + lua_pop(luaState, lua_gettop(luaState)); + } else { + SWSS_LOG_ERROR("Invalid lua code, unable to do operation.\n"); + paramMap[param] = resultValue; + continue; + } + lua_getglobal(luaState, "ret"); + paramMap[param] = lua_tostring(luaState, -1); + lua_pop(luaState, 1); + } else { + paramMap[params[j]] = resultValue; + } + } return true; } return false; diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index e0e9252aaa63..2062a06e32e5 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -1,6 +1,13 @@ #ifndef SYSLOG_PARSER_H #define SYSLOG_PARSER_H +extern "C" +{ + #include + #include + #include +} + #include #include #include @@ -20,7 +27,7 @@ class SyslogParser { public: vector m_expressions; json m_regexList = json::array(); - bool parseMessage(string message, string& tag, event_params_t& paramDict); + bool parseMessage(string message, string& tag, event_params_t& paramDict, lua_State* luaState); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp new file mode 100644 index 000000000000..21581471f177 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp @@ -0,0 +1,78 @@ +#include +#include "timestamp_formatter.h" +#include "logger.h" + +using namespace std; + +/*** + * + * Formats given string into string needed by YANG model + * + * @param timestamp parsed from syslog message + * @return formatted timestamp that conforms to YANG model + * + */ + +const unordered_map g_monthDict { + { "Jan", "01" }, + { "Feb", "02" }, + { "Mar", "03" }, + { "Apr", "04" }, + { "May", "05" }, + { "Jun", "06" }, + { "Jul", "07" }, + { "Aug", "08" }, + { "Sep", "09" }, + { "Oct", "10" }, + { "Nov", "11" }, + { "Dec", "12" } +}; + +string TimestampFormatter::getYear(string timestamp) { + if(!m_storedTimestamp.empty()) { + if(m_storedTimestamp.compare(timestamp) <= 0) { + m_storedTimestamp = timestamp; + return m_storedYear; + } + } + // no last timestamp or year change + time_t currentTime = time(nullptr); + tm* const localTime = localtime(¤tTime); + stringstream ss; + auto currentYear = 1900 + localTime->tm_year; + ss << currentYear; // get current year + string year = ss.str(); + m_storedTimestamp = timestamp; + m_storedYear = year; + return year; +} + +string TimestampFormatter::changeTimestampFormat(string timestamp) { + smatch dateComponents; + string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ + if(!regex_search(timestamp, dateComponents, m_expression) || dateComponents.size() != 4) { + SWSS_LOG_ERROR("Timestamp unable to be broken down into components.\n"); + return ""; // empty string is error + } + + string month; + auto it = g_monthDict.find(dateComponents[1].str()); + if(it != g_monthDict.end()) { + month = it->second; + } else { + SWSS_LOG_ERROR("Timestamp month was given in wrong format.\n"); + return ""; + } + + string day = dateComponents[2].str(); + if(day.size() == 1) { // convert 1 -> 01 + day.insert(day.begin(), '0'); + } + + string time = dateComponents[3].str(); + string currentTimestamp = month + day + time; + string year = getYear(currentTimestamp); + + formattedTimestamp = year + "-" + month + "-" + day + "T" + time + "Z"; + return formattedTimestamp; +} diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h new file mode 100644 index 000000000000..95b8e7b15e17 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h @@ -0,0 +1,28 @@ +#ifndef TIMESTAMP_FORMATTER_H +#define TIMESTAMP_FORMATTER_H + +#include +#include +#include + +/*** + * + * TimestampFormatter is responsible for formatting the timestamps received in syslog messages and to format them into the type needed by YANG model + * + */ + +class TimestampFormatter { +public: + std::string changeTimestampFormat(std::string timestamp); + TimestampFormatter(std::string regexFormatString) { + std::regex expr(regexFormatString); + m_expression = expr; + } + std::string m_storedTimestamp; + std::string m_storedYear; +private: + std::regex m_expression; + std::string getYear(std::string timestamp); +}; + +#endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 5893c70f66c0..356a2d015441 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -1,16 +1,25 @@ +extern "C" +{ + #include + #include + #include +} #include #include #include #include "gtest/gtest.h" -#include "common/json.hpp" -#include "common/events.h" -#include "rsyslog_plugin/rsyslog_plugin.h" -#include "rsyslog_plugin/syslog_parser.h" +#include "json.hpp" +#include "events.h" +#include "../rsyslog_plugin/rsyslog_plugin.h" +#include "../rsyslog_plugin/syslog_parser.h" +#include "../rsyslog_plugin/timestamp_formatter.h" using namespace std; +using namespace swss; using json = nlohmann::json; TEST(syslog_parser, matching_regex) { + json jList = json::array(); vector testExpressions; string regexString = "timestamp (.*) message (.*) other_data (.*)"; @@ -33,12 +42,15 @@ TEST(syslog_parser, matching_regex) { SyslogParser* parser = new SyslogParser(); parser->m_expressions = testExpressions; parser->m_regexList = jList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); - bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, paramDict); + bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, paramDict, luaState); EXPECT_EQ(true, success); EXPECT_EQ("test_tag", tag); EXPECT_EQ(expectedDict, paramDict); - + + lua_close(luaState); delete parser; } @@ -60,70 +72,180 @@ TEST(syslog_parser, no_matching_regex) { SyslogParser* parser = new SyslogParser(); parser->m_expressions = testExpressions; parser->m_regexList = jList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); - bool success = parser->parseMessage("Test Message", tag, paramDict); + bool success = parser->parseMessage("Test Message", tag, paramDict, luaState); EXPECT_EQ(false, success); + + lua_close(luaState); + delete parser; +} + +TEST(syslog_parser, lua_code_valid_1) { + json jList = json::array(); + vector testExpressions; + string regexString = ".* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + json jTest; + jTest["tag"] = "test_tag"; + jTest["regex"] = regexString; + jTest["params"] = { "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; + jList.push_back(jTest); + regex expression(regexString); + testExpressions.push_back(expression); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["is-sent"] = "true"; + expectedDict["ip"] = "100.95.147.229"; + expectedDict["major-code"] = "2"; + expectedDict["minor-code"] = "2"; + + SyslogParser* parser = new SyslogParser(); + parser->m_expressions = testExpressions; + parser->m_regexList = jList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("NOTIFICATION: sent to neighbor 100.95.147.229 active 2/2 (peer in wrong AS) 2 bytes", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); delete parser; } +TEST(syslog_parser, lua_code_valid_2) { + json jList = json::array(); + vector testExpressions; + string regexString = ".* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + json jTest; + jTest["tag"] = "test_tag"; + jTest["regex"] = regexString; + jTest["params"] = { "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; + jList.push_back(jTest); + regex expression(regexString); + testExpressions.push_back(expression); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["is-sent"] = "false"; + expectedDict["ip"] = "10.10.24.216"; + expectedDict["major-code"] = "6"; + expectedDict["minor-code"] = "2"; + + SyslogParser* parser = new SyslogParser(); + parser->m_expressions = testExpressions; + parser->m_regexList = jList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("NOTIFICATION: received from neighbor 10.10.24.216 active 6/2 (Administrative Shutdown) 0 bytes", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); + delete parser; + +} RsyslogPlugin* createPlugin(string path) { RsyslogPlugin* plugin = new RsyslogPlugin("test_mod_name", path); return plugin; } +TimestampFormatter* createFormatter() { + const string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; + TimestampFormatter* formatter = new TimestampFormatter(timestampFormatRegex); + return formatter; +} + TEST(rsyslog_plugin, onInit_invalidJS0N) { - auto plugin = createPlugin("./test_regex_1.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_1.rc.json"); EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_emptyJSON) { - auto plugin = createPlugin("./test_regex_6.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_6.rc.json"); EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_missingRegex) { - auto plugin = createPlugin("./test_regex_3.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_3.rc.json"); EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onInit_invalidRegex) { - auto plugin = createPlugin("./test_regex_4.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_4.rc.json"); EXPECT_NE(0, plugin->onInit()); delete plugin; } TEST(rsyslog_plugin, onMessage) { - auto plugin = createPlugin("./test_regex_2.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_2.rc.json"); EXPECT_EQ(0, plugin->onInit()); - ifstream infile("test_syslogs.txt"); + ifstream infile("./rsyslog_plugin_tests/test_syslogs.txt"); string logMessage; bool parseResult; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); while(infile >> logMessage >> parseResult) { - EXPECT_EQ(parseResult, plugin->onMessage(logMessage)); + EXPECT_EQ(parseResult, plugin->onMessage(logMessage, luaState)); } + lua_close(luaState); infile.close(); delete plugin; } TEST(rsyslog_plugin, onMessage_noParams) { - auto plugin = createPlugin("./test_regex_5.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_5.rc.json"); EXPECT_EQ(0, plugin->onInit()); - ifstream infile("test_syslogs_2.txt"); + ifstream infile("./rsyslog_plugin_tests/test_syslogs_2.txt"); string logMessage; bool parseResult; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); while(infile >> logMessage >> parseResult) { - EXPECT_EQ(parseResult, plugin->onMessage(logMessage)); + EXPECT_EQ(parseResult, plugin->onMessage(logMessage, luaState)); } + lua_close(luaState); infile.close(); delete plugin; } +TEST(timestampFormatter, changeTimestampFormat) { + auto formatter = createFormatter(); + + string timestampOne = "Jul 20 10:09:40.230874"; + string timestampTwo = "Jan 1 00:00:00.000000"; + string timestampThree = "Dec 31 23:59:59.000000"; + + string formattedTimestampOne = formatter->changeTimestampFormat(timestampOne); + EXPECT_EQ("2022-07-20T10:09:40.230874Z", formattedTimestampOne); + + EXPECT_EQ("072010:09:40.230874", formatter->m_storedTimestamp); + + string formattedTimestampTwo = formatter->changeTimestampFormat(timestampTwo); + EXPECT_EQ("2022-01-01T00:00:00.000000Z", formattedTimestampTwo); + + formatter->m_storedTimestamp = "010100:00:00.000000"; + formatter->m_storedYear = "2025"; + + string formattedTimestampThree = formatter->changeTimestampFormat(timestampThree); + EXPECT_EQ("2025-12-31T23:59:59.000000Z", formattedTimestampThree); +} + int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk index dd2e47364a2b..6be7ef09786a 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk +++ b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk @@ -1,12 +1,12 @@ CC := g++ -RSYSLOG-PLUGIN_TEST_OBJS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.o +RSYSLOG-PLUGIN-TEST_OBJS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.o C_DEPS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.d rsyslog_plugin_tests/%.o: rsyslog_plugin_tests/%.cpp @echo 'Building file: $<' @echo 'Invoking: GCC C++ Compiler' - $(CC) -D__FILENAME__="$(subst rsyslog_plugin_tests/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$(@)" "$<" - @echo 'Finished building: $< - '@echo ' ' + $(CC) -D__FILENAME__="$(subst rsyslog_plugin_tests/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json index 72e8ffc0db8a..e69de29bb2d1 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json @@ -1 +0,0 @@ -* diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json index 5c9a2e9612ed..66788d326331 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json @@ -1,7 +1,7 @@ [ { "tag": "bgp-state", - "regex": "([a-zA-Z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}) .* %ADJCHANGE: neighbor (.*) (Up|Down) .*", - "params": [ "timestamp", "neighbor_ip", "state" ] + "regex": ".* %ADJCHANGE: neighbor (.*) (Up|Down) .*", + "params": ["neighbor_ip", "state" ] } ] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json index 244b601fbac5..c3a875aded0f 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json @@ -1,7 +1,7 @@ [ { "tag": "TEST-TAG-INVALID-REGEX", - "regex": "++", + "regex": "+++ ++++(", "params": [] } ] From 540af0fb7571027e44f130dee79cb92177588ec0 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 21 Jul 2022 00:08:57 +0000 Subject: [PATCH 58/66] Remove unneccessary lines --- src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp | 2 +- .../rsyslog_plugin_tests/rsyslog_plugin_ut.cpp | 11 ++--------- .../rsyslog_plugin_tests/test_regex_6.rc.json | 0 3 files changed, 3 insertions(+), 10 deletions(-) delete mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index 3629602fdb43..f4c79ba44775 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -23,7 +23,7 @@ bool RsyslogPlugin::onMessage(string msg, lua_State* luaState) { } paramDict["timestamp"] = formattedTimestamp; int returnCode = event_publish(m_eventHandle, tag, ¶mDict); - if (returnCode != 0 || timestamp.empty()) { + if(returnCode != 0) { SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s.\n", tag.c_str()); return false; } diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 356a2d015441..4d0363ed3834 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -18,8 +18,7 @@ using namespace std; using namespace swss; using json = nlohmann::json; -TEST(syslog_parser, matching_regex) { - +TEST(syslog_parser, matching_regex) { json jList = json::array(); vector testExpressions; string regexString = "timestamp (.*) message (.*) other_data (.*)"; @@ -166,14 +165,8 @@ TimestampFormatter* createFormatter() { return formatter; } -TEST(rsyslog_plugin, onInit_invalidJS0N) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_1.rc.json"); - EXPECT_NE(0, plugin->onInit()); - delete plugin; -} - TEST(rsyslog_plugin, onInit_emptyJSON) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_6.rc.json"); + auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_1.rc.json"); EXPECT_NE(0, plugin->onInit()); delete plugin; } diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_6.rc.json deleted file mode 100644 index e69de29bb2d1..000000000000 From 8d8103e779ae838a9b284746717cfb8675b0ef6f Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Thu, 21 Jul 2022 02:22:59 +0000 Subject: [PATCH 59/66] Updates per review comments; No logical code changes --- src/sonic-eventd/src/main.cpp | 1 + src/sonic-eventd/tools/events_tool.cpp | 35 -------------------------- 2 files changed, 1 insertion(+), 35 deletions(-) diff --git a/src/sonic-eventd/src/main.cpp b/src/sonic-eventd/src/main.cpp index fd14e64bc81e..ea058d5fac14 100644 --- a/src/sonic-eventd/src/main.cpp +++ b/src/sonic-eventd/src/main.cpp @@ -1,4 +1,5 @@ #include "logger.h" +#include "eventd.h" void run_eventd_service(); diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp index 8da0a8bf396e..68cf39dd2f25 100644 --- a/src/sonic-eventd/tools/events_tool.cpp +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -141,33 +141,6 @@ do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt } -#if 0 -void -parse_file(const string infile) -{ - ifstream input(infile); - string line; - - while(getline( input, line )) { - const auto &data = nlohmann::json::parse(line); - printf("is_obj=%d is_arr=%d\n", data.is_object(), data.is_array()); - printf("size=%d\n", (int)data.size()); - string k(data.begin().key()); - printf("key=%s\n", k.c_str()); - const auto &val = data.begin().value(); - printf("val: is_obj=%d is_arr=%d\n", val.is_object(), val.is_array()); - printf("Params:\n"); - for(auto par_it = val.begin(); par_it != val.end(); par_it++) { - string k(par_it.key()); - string v(par_it.value()); - printf(" key=%s\n",k.c_str()); - printf(" val=%s\n", v.c_str()); - } - printf("------------------------------\n"); - } -} -#endif - int do_send(const string infile, int cnt, int pause) { @@ -216,14 +189,6 @@ do_send(const string infile, int cnt, int pause) } } -#if 0 - cout << "Events to send\n"; - for(lst_t::const_iterator itc=lst.begin(); itc != lst.end(); ++itc) { - cout << "tag:" << itc->tag << " params:" << t_map_to_str(itc->params) << "\n"; - } - cout << "Events END\n"; -#endif - if (lst.empty()) { evt_t evt = { "test-tag", From b8300f3a75571b0c524e80a32eb896964d963807 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 22 Jul 2022 00:06:46 +0000 Subject: [PATCH 60/66] Add changes from review --- .../rsyslog_plugin/rsyslog_plugin.cpp | 15 +--- .../rsyslog_plugin/rsyslog_plugin.h | 2 - .../rsyslog_plugin/syslog_parser.cpp | 63 +++++++++----- .../rsyslog_plugin/syslog_parser.h | 4 + .../rsyslog_plugin/timestamp_formatter.cpp | 14 +-- .../rsyslog_plugin/timestamp_formatter.h | 9 +- .../rsyslog_plugin_ut.cpp | 87 +++++++++++-------- 7 files changed, 107 insertions(+), 87 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index f4c79ba44775..85604424b7f5 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -16,12 +16,6 @@ bool RsyslogPlugin::onMessage(string msg, lua_State* luaState) { SWSS_LOG_DEBUG("%s was not able to be parsed into a structured event\n", msg.c_str()); return false; } else { - string timestamp = paramDict["timestamp"]; - string formattedTimestamp = m_timestampFormatter->changeTimestampFormat(paramDict["timestamp"]); - if(timestamp.empty()) { - SWSS_LOG_ERROR("Timestamp Formatter was unable to format %s.\n", timestamp.c_str()); - } - paramDict["timestamp"] = formattedTimestamp; int returnCode = event_publish(m_eventHandle, tag, ¶mDict); if(returnCode != 0) { SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s.\n", tag.c_str()); @@ -45,17 +39,14 @@ bool RsyslogPlugin::createRegexList() { return false; } + string regexString; regex expression; for(long unsigned int i = 0; i < m_parser->m_regexList.size(); i++) { - string regexString = "([a-zA-Z]{3} [0-9]{1,2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}) "; try { - string givenRegex = m_parser->m_regexList[i]["regex"]; - regexString += givenRegex; + regexString = m_parser->m_regexList[i]["regex"]; string tag = m_parser->m_regexList[i]["tag"]; vector params = m_parser->m_regexList[i]["params"]; - params.insert(params.begin(), "timestamp"); // each event will have timestamp so inserting it - m_parser->m_regexList[i]["params"] = params; regex expr(regexString); expression = expr; } catch (domain_error& deException) { @@ -103,9 +94,7 @@ int RsyslogPlugin::onInit() { } RsyslogPlugin::RsyslogPlugin(string moduleName, string regexPath) { - const string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; m_parser = unique_ptr(new SyslogParser()); - m_timestampFormatter = unique_ptr(new TimestampFormatter(timestampFormatRegex)); m_moduleName = moduleName; m_regexPath = regexPath; } diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h index 9d307a78a082..0811b5f3032f 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -10,7 +10,6 @@ extern "C" #include #include #include "syslog_parser.h" -#include "timestamp_formatter.h" #include "events.h" #include "logger.h" @@ -31,7 +30,6 @@ class RsyslogPlugin { RsyslogPlugin(string moduleName, string regexPath); private: unique_ptr m_parser; - unique_ptr m_timestampFormatter; event_handle_t m_eventHandle; string m_regexPath; string m_moduleName; diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index 4baa9b6b4dc0..f82e0dad0dab 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -1,4 +1,5 @@ #include +#include #include "syslog_parser.h" #include "logger.h" @@ -10,6 +11,15 @@ * */ +void SyslogParser::addTimestamp(string message, event_params_t& paramMap) { + string formattedTimestamp = m_timestampFormatter->changeTimestampFormat(message); + if(formattedTimestamp.empty()) { + SWSS_LOG_ERROR("Message does not contain valid timestamp and cannot be formatted: %s.\n", message.c_str()); + return; + } + paramMap["timestamp"] = formattedTimestamp; +} + bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap, lua_State* luaState) { for(long unsigned int i = 0; i < m_regexList.size(); i++) { smatch matchResults; @@ -23,32 +33,39 @@ bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t for(long unsigned int j = 0; j < params.size(); j++) { auto delimPos = params[j].find(':'); string resultValue = matchResults[j + 1].str(); - if(delimPos != string::npos) { // have to execute lua script - string param = params[j].substr(0, delimPos); - string luaString = params[j].substr(delimPos + 1); - if(luaString.empty()) { // empty lua code - SWSS_LOG_INFO("Lua code missing after :, skipping operation"); - paramMap[param] = resultValue; - continue; - } - const char* luaCode = luaString.c_str(); - lua_pushstring(luaState, resultValue.c_str()); - lua_setglobal(luaState, "arg"); - if(luaL_dostring(luaState, luaCode) == 0) { - lua_pop(luaState, lua_gettop(luaState)); - } else { - SWSS_LOG_ERROR("Invalid lua code, unable to do operation.\n"); - paramMap[param] = resultValue; - continue; - } - lua_getglobal(luaState, "ret"); - paramMap[param] = lua_tostring(luaState, -1); - lua_pop(luaState, 1); - } else { - paramMap[params[j]] = resultValue; + if(delimPos == string::npos) { // no lua code + paramMap[params[j]] = resultValue; + continue; } + // have to execute lua script + string param = params[j].substr(0, delimPos); + string luaString = params[j].substr(delimPos + 1); + if(luaString.empty()) { // empty lua code + SWSS_LOG_INFO("Lua code missing after :, skipping operation"); + paramMap[param] = resultValue; + continue; + } + const char* luaCode = luaString.c_str(); + lua_pushstring(luaState, resultValue.c_str()); + lua_setglobal(luaState, "arg"); + if(luaL_dostring(luaState, luaCode) == 0) { + lua_pop(luaState, lua_gettop(luaState)); + } else { + SWSS_LOG_ERROR("Invalid lua code, unable to do operation.\n"); + paramMap[param] = resultValue; + continue; + } + lua_getglobal(luaState, "ret"); + paramMap[param] = lua_tostring(luaState, -1); + lua_pop(luaState, 1); } + addTimestamp(message, paramMap); return true; } return false; } + +SyslogParser::SyslogParser() { + string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}).*"; + m_timestampFormatter = unique_ptr(new TimestampFormatter(timestampFormatRegex)); +} diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index 2062a06e32e5..9cb28126188e 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -13,6 +13,7 @@ extern "C" #include #include "json.hpp" #include "events.h" +#include "timestamp_formatter.h" using namespace std; using json = nlohmann::json; @@ -25,9 +26,12 @@ using json = nlohmann::json; class SyslogParser { public: + unique_ptr m_timestampFormatter; vector m_expressions; json m_regexList = json::array(); + void addTimestamp(string message, event_params_t& paramDict); bool parseMessage(string message, string& tag, event_params_t& paramDict, lua_State* luaState); + SyslogParser(); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp index 21581471f177..ef428ac0b808 100644 --- a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp @@ -47,14 +47,13 @@ string TimestampFormatter::getYear(string timestamp) { return year; } -string TimestampFormatter::changeTimestampFormat(string timestamp) { +string TimestampFormatter::changeTimestampFormat(string message) { smatch dateComponents; string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ - if(!regex_search(timestamp, dateComponents, m_expression) || dateComponents.size() != 4) { - SWSS_LOG_ERROR("Timestamp unable to be broken down into components.\n"); + if(!regex_search(message, dateComponents, m_expression) || dateComponents.size() != 4) { //whole,month,day,time + printf("Timestamp unable to be broken down into components.\n"); return ""; // empty string is error } - string month; auto it = g_monthDict.find(dateComponents[1].str()); if(it != g_monthDict.end()) { @@ -63,12 +62,10 @@ string TimestampFormatter::changeTimestampFormat(string timestamp) { SWSS_LOG_ERROR("Timestamp month was given in wrong format.\n"); return ""; } - string day = dateComponents[2].str(); if(day.size() == 1) { // convert 1 -> 01 day.insert(day.begin(), '0'); } - string time = dateComponents[3].str(); string currentTimestamp = month + day + time; string year = getYear(currentTimestamp); @@ -76,3 +73,8 @@ string TimestampFormatter::changeTimestampFormat(string timestamp) { formattedTimestamp = year + "-" + month + "-" + day + "T" + time + "Z"; return formattedTimestamp; } + +TimestampFormatter::TimestampFormatter(string timestampFormatRegex) { + regex expr(timestampFormatRegex); + m_expression = expr; +} diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h index 95b8e7b15e17..593344243ca6 100644 --- a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h @@ -1,6 +1,6 @@ #ifndef TIMESTAMP_FORMATTER_H #define TIMESTAMP_FORMATTER_H - +#include #include #include #include @@ -13,11 +13,8 @@ class TimestampFormatter { public: - std::string changeTimestampFormat(std::string timestamp); - TimestampFormatter(std::string regexFormatString) { - std::regex expr(regexFormatString); - m_expression = expr; - } + std::string changeTimestampFormat(std::string message); + TimestampFormatter(std::string timestampFormatRegex); std::string m_storedTimestamp; std::string m_storedYear; private: diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 4d0363ed3834..6836a3082884 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -6,6 +6,7 @@ extern "C" } #include #include +#include #include #include "gtest/gtest.h" #include "json.hpp" @@ -21,11 +22,11 @@ using json = nlohmann::json; TEST(syslog_parser, matching_regex) { json jList = json::array(); vector testExpressions; - string regexString = "timestamp (.*) message (.*) other_data (.*)"; + string regexString = "message (.*) other_data (.*) even_more_data (.*)"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = { "timestamp", "message", "other_data" }; + jTest["params"] = { "message", "other_data", "even_more_data" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -34,23 +35,56 @@ TEST(syslog_parser, matching_regex) { event_params_t paramDict; event_params_t expectedDict; - expectedDict["timestamp"] = "test_timestamp"; expectedDict["message"] = "test_message"; expectedDict["other_data"] = "test_data"; + expectedDict["even_more_data"] = "test_data"; - SyslogParser* parser = new SyslogParser(); + unique_ptr parser(new SyslogParser()); parser->m_expressions = testExpressions; parser->m_regexList = jList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); - bool success = parser->parseMessage("timestamp test_timestamp message test_message other_data test_data", tag, paramDict, luaState); + bool success = parser->parseMessage("message test_message other_data test_data even_more_data test_data", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); +} + +TEST(syslog_parser, matching_regex_timestamp) { + json jList = json::array(); + vector testExpressions; + string regexString = "message (.*) other_data (.*)"; + json jTest; + jTest["tag"] = "test_tag"; + jTest["regex"] = regexString; + jTest["params"] = { "message", "other_data" }; + jList.push_back(jTest); + regex expression(regexString); + testExpressions.push_back(expression); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["message"] = "test_message"; + expectedDict["other_data"] = "test_data"; + expectedDict["timestamp"] = "2022-07-21T02:10:00.000000Z"; + + unique_ptr parser(new SyslogParser()); + parser->m_expressions = testExpressions; + parser->m_regexList = jList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("Jul 21 02:10:00.000000 message test_message other_data test_data", tag, paramDict, luaState); EXPECT_EQ(true, success); EXPECT_EQ("test_tag", tag); EXPECT_EQ(expectedDict, paramDict); lua_close(luaState); - delete parser; } TEST(syslog_parser, no_matching_regex) { @@ -68,7 +102,7 @@ TEST(syslog_parser, no_matching_regex) { string tag; event_params_t paramDict; - SyslogParser* parser = new SyslogParser(); + unique_ptr parser(new SyslogParser()); parser->m_expressions = testExpressions; parser->m_regexList = jList; lua_State* luaState = luaL_newstate(); @@ -78,7 +112,6 @@ TEST(syslog_parser, no_matching_regex) { EXPECT_EQ(false, success); lua_close(luaState); - delete parser; } TEST(syslog_parser, lua_code_valid_1) { @@ -102,7 +135,7 @@ TEST(syslog_parser, lua_code_valid_1) { expectedDict["major-code"] = "2"; expectedDict["minor-code"] = "2"; - SyslogParser* parser = new SyslogParser(); + unique_ptr parser(new SyslogParser()); parser->m_expressions = testExpressions; parser->m_regexList = jList; lua_State* luaState = luaL_newstate(); @@ -114,7 +147,6 @@ TEST(syslog_parser, lua_code_valid_1) { EXPECT_EQ(expectedDict, paramDict); lua_close(luaState); - delete parser; } TEST(syslog_parser, lua_code_valid_2) { @@ -138,7 +170,7 @@ TEST(syslog_parser, lua_code_valid_2) { expectedDict["major-code"] = "6"; expectedDict["minor-code"] = "2"; - SyslogParser* parser = new SyslogParser(); + unique_ptr parser(new SyslogParser()); parser->m_expressions = testExpressions; parser->m_regexList = jList; lua_State* luaState = luaL_newstate(); @@ -150,43 +182,26 @@ TEST(syslog_parser, lua_code_valid_2) { EXPECT_EQ(expectedDict, paramDict); lua_close(luaState); - delete parser; - -} - -RsyslogPlugin* createPlugin(string path) { - RsyslogPlugin* plugin = new RsyslogPlugin("test_mod_name", path); - return plugin; -} - -TimestampFormatter* createFormatter() { - const string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; - TimestampFormatter* formatter = new TimestampFormatter(timestampFormatRegex); - return formatter; } TEST(rsyslog_plugin, onInit_emptyJSON) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_1.rc.json"); + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_1.rc.json")); EXPECT_NE(0, plugin->onInit()); - delete plugin; } TEST(rsyslog_plugin, onInit_missingRegex) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_3.rc.json"); + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_3.rc.json")); EXPECT_NE(0, plugin->onInit()); - delete plugin; } TEST(rsyslog_plugin, onInit_invalidRegex) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_4.rc.json"); + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_4.rc.json")); EXPECT_NE(0, plugin->onInit()); - delete plugin; } TEST(rsyslog_plugin, onMessage) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_2.rc.json"); + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_2.rc.json")); EXPECT_EQ(0, plugin->onInit()); - ifstream infile("./rsyslog_plugin_tests/test_syslogs.txt"); string logMessage; bool parseResult; @@ -197,13 +212,11 @@ TEST(rsyslog_plugin, onMessage) { } lua_close(luaState); infile.close(); - delete plugin; } TEST(rsyslog_plugin, onMessage_noParams) { - auto plugin = createPlugin("./rsyslog_plugin_tests/test_regex_5.rc.json"); + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_5.rc.json")); EXPECT_EQ(0, plugin->onInit()); - ifstream infile("./rsyslog_plugin_tests/test_syslogs_2.txt"); string logMessage; bool parseResult; @@ -214,11 +227,11 @@ TEST(rsyslog_plugin, onMessage_noParams) { } lua_close(luaState); infile.close(); - delete plugin; } TEST(timestampFormatter, changeTimestampFormat) { - auto formatter = createFormatter(); + string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; + unique_ptr formatter(new TimestampFormatter(timestampFormatRegex)); string timestampOne = "Jul 20 10:09:40.230874"; string timestampTwo = "Jan 1 00:00:00.000000"; From 7c4c78d76ad2c0916c5fbed3a46ab34b25f414ef Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 22 Jul 2022 00:28:51 +0000 Subject: [PATCH 61/66] Remove print statement --- src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp | 2 +- src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index f82e0dad0dab..10afce381441 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -66,6 +66,6 @@ bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t } SyslogParser::SyslogParser() { - string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}).*"; + string timestampFormatRegex = "([a-zA-Z]{3})\\s*([0-9]{1,2})\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}).*"; m_timestampFormatter = unique_ptr(new TimestampFormatter(timestampFormatRegex)); } diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp index ef428ac0b808..99a412574dad 100644 --- a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp @@ -51,7 +51,7 @@ string TimestampFormatter::changeTimestampFormat(string message) { smatch dateComponents; string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ if(!regex_search(message, dateComponents, m_expression) || dateComponents.size() != 4) { //whole,month,day,time - printf("Timestamp unable to be broken down into components.\n"); + SWSS_LOG_ERROR("Timestamp unable to be broken down into components.\n"); return ""; // empty string is error } string month; From 52e15909c524493c04e0392f1318b8dda1b44c14 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 22 Jul 2022 23:30:31 +0000 Subject: [PATCH 62/66] Add peer review changes --- .../rsyslog_plugin/rsyslog_plugin.cpp | 7 +++- .../rsyslog_plugin/syslog_parser.cpp | 28 ++++++++-------- .../rsyslog_plugin/syslog_parser.h | 1 - .../rsyslog_plugin/timestamp_formatter.cpp | 26 ++++++--------- .../rsyslog_plugin/timestamp_formatter.h | 14 ++++---- .../rsyslog_plugin_ut.cpp | 32 +++++++++---------- 6 files changed, 53 insertions(+), 55 deletions(-) diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index 85604424b7f5..d40f5f00f3a8 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -44,9 +44,14 @@ bool RsyslogPlugin::createRegexList() { for(long unsigned int i = 0; i < m_parser->m_regexList.size(); i++) { try { - regexString = m_parser->m_regexList[i]["regex"]; + string timestampRegex = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*"; + string eventRegex = m_parser->m_regexList[i]["regex"]; + regexString = timestampRegex + eventRegex; string tag = m_parser->m_regexList[i]["tag"]; vector params = m_parser->m_regexList[i]["params"]; + vector timestampParams = { "month", "day", "time" }; + params.insert(params.begin(), timestampParams.begin(), timestampParams.end()); + m_parser->m_regexList[i]["params"] = params; regex expr(regexString); expression = expr; } catch (domain_error& deException) { diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index 10afce381441..5fe043e76ca2 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -11,26 +11,26 @@ * */ -void SyslogParser::addTimestamp(string message, event_params_t& paramMap) { - string formattedTimestamp = m_timestampFormatter->changeTimestampFormat(message); - if(formattedTimestamp.empty()) { - SWSS_LOG_ERROR("Message does not contain valid timestamp and cannot be formatted: %s.\n", message.c_str()); - return; - } - paramMap["timestamp"] = formattedTimestamp; -} - bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap, lua_State* luaState) { for(long unsigned int i = 0; i < m_regexList.size(); i++) { smatch matchResults; vector params = m_regexList[i]["params"]; - if(!regex_search(message, matchResults, m_expressions[i]) || params.size() != matchResults.size() - 1) { + if(!regex_search(message, matchResults, m_expressions[i]) || params.size() != matchResults.size() - 1 || matchResults.size() < 4) { continue; } + + if(!matchResults[1].str().empty() && !matchResults[2].str().empty() && !matchResults[3].str().empty()) { // found timestamp components + string formattedTimestamp = m_timestampFormatter->changeTimestampFormat({ matchResults[1].str(), matchResults[2].str(), matchResults[3].str() }); + if(!formattedTimestamp.empty()) { + paramMap["timestamp"] = formattedTimestamp; + } else { + SWSS_LOG_ERROR("Timestamp is invalid and is not able to be formatted"); + } + } // found matching regex eventTag = m_regexList[i]["tag"]; - // check params for lua code - for(long unsigned int j = 0; j < params.size(); j++) { + // check params for lua code + for(long unsigned int j = 3; j < params.size(); j++) { auto delimPos = params[j].find(':'); string resultValue = matchResults[j + 1].str(); if(delimPos == string::npos) { // no lua code @@ -59,13 +59,11 @@ bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t paramMap[param] = lua_tostring(luaState, -1); lua_pop(luaState, 1); } - addTimestamp(message, paramMap); return true; } return false; } SyslogParser::SyslogParser() { - string timestampFormatRegex = "([a-zA-Z]{3})\\s*([0-9]{1,2})\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6}).*"; - m_timestampFormatter = unique_ptr(new TimestampFormatter(timestampFormatRegex)); + m_timestampFormatter = unique_ptr(new TimestampFormatter()); } diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index 9cb28126188e..5ae2a04b7567 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -29,7 +29,6 @@ class SyslogParser { unique_ptr m_timestampFormatter; vector m_expressions; json m_regexList = json::array(); - void addTimestamp(string message, event_params_t& paramDict); bool parseMessage(string message, string& tag, event_params_t& paramDict, lua_State* luaState); SyslogParser(); }; diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp index 99a412574dad..cc179adbbc75 100644 --- a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp @@ -1,6 +1,7 @@ #include #include "timestamp_formatter.h" #include "logger.h" +#include "events.h" using namespace std; @@ -13,7 +14,7 @@ using namespace std; * */ -const unordered_map g_monthDict { +static const unordered_map g_monthDict { { "Jan", "01" }, { "Feb", "02" }, { "Mar", "03" }, @@ -47,34 +48,27 @@ string TimestampFormatter::getYear(string timestamp) { return year; } -string TimestampFormatter::changeTimestampFormat(string message) { - smatch dateComponents; - string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ - if(!regex_search(message, dateComponents, m_expression) || dateComponents.size() != 4) { //whole,month,day,time - SWSS_LOG_ERROR("Timestamp unable to be broken down into components.\n"); - return ""; // empty string is error +string TimestampFormatter::changeTimestampFormat(vector dateComponents) { + if(dateComponents.size() < 3) { + SWSS_LOG_ERROR("Timestamp formatter unable to format due to invalid input"); + return ""; } + string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ string month; - auto it = g_monthDict.find(dateComponents[1].str()); + auto it = g_monthDict.find(dateComponents[0]); if(it != g_monthDict.end()) { month = it->second; } else { SWSS_LOG_ERROR("Timestamp month was given in wrong format.\n"); return ""; } - string day = dateComponents[2].str(); + string day = dateComponents[1]; if(day.size() == 1) { // convert 1 -> 01 day.insert(day.begin(), '0'); } - string time = dateComponents[3].str(); + string time = dateComponents[2]; string currentTimestamp = month + day + time; string year = getYear(currentTimestamp); - formattedTimestamp = year + "-" + month + "-" + day + "T" + time + "Z"; return formattedTimestamp; } - -TimestampFormatter::TimestampFormatter(string timestampFormatRegex) { - regex expr(timestampFormatRegex); - m_expression = expr; -} diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h index 593344243ca6..ea99c4cfcb8c 100644 --- a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h @@ -1,9 +1,13 @@ #ifndef TIMESTAMP_FORMATTER_H #define TIMESTAMP_FORMATTER_H + #include #include #include #include +#include + +using namespace std; /*** * @@ -13,13 +17,11 @@ class TimestampFormatter { public: - std::string changeTimestampFormat(std::string message); - TimestampFormatter(std::string timestampFormatRegex); - std::string m_storedTimestamp; - std::string m_storedYear; + string changeTimestampFormat(vector dateComponents); + string m_storedTimestamp; + string m_storedYear; private: - std::regex m_expression; - std::string getYear(std::string timestamp); + string getYear(string timestamp); }; #endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 6836a3082884..82369039d823 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -22,11 +22,11 @@ using json = nlohmann::json; TEST(syslog_parser, matching_regex) { json jList = json::array(); vector testExpressions; - string regexString = "message (.*) other_data (.*) even_more_data (.*)"; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*) even_more_data (.*)"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = { "message", "other_data", "even_more_data" }; + jTest["params"] = { "month", "day", "time", "message", "other_data", "even_more_data" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -56,11 +56,11 @@ TEST(syslog_parser, matching_regex) { TEST(syslog_parser, matching_regex_timestamp) { json jList = json::array(); vector testExpressions; - string regexString = "message (.*) other_data (.*)"; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*)"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = { "message", "other_data" }; + jTest["params"] = { "month", "day", "time", "message", "other_data" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -90,11 +90,11 @@ TEST(syslog_parser, matching_regex_timestamp) { TEST(syslog_parser, no_matching_regex) { json jList = json::array(); vector testExpressions; - string regexString = "no match"; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\s*no match"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = vector(); + jTest["params"] = { "month", "day", "time" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -117,11 +117,11 @@ TEST(syslog_parser, no_matching_regex) { TEST(syslog_parser, lua_code_valid_1) { json jList = json::array(); vector testExpressions; - string regexString = ".* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = { "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; + jTest["params"] = { "month", "day", "time", "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -152,11 +152,11 @@ TEST(syslog_parser, lua_code_valid_1) { TEST(syslog_parser, lua_code_valid_2) { json jList = json::array(); vector testExpressions; - string regexString = ".* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + string regexString = "([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; json jTest; jTest["tag"] = "test_tag"; jTest["regex"] = regexString; - jTest["params"] = { "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; + jTest["params"] = { "month", "day", "time", "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; jList.push_back(jTest); regex expression(regexString); testExpressions.push_back(expression); @@ -169,6 +169,7 @@ TEST(syslog_parser, lua_code_valid_2) { expectedDict["ip"] = "10.10.24.216"; expectedDict["major-code"] = "6"; expectedDict["minor-code"] = "2"; + expectedDict["timestamp"] = "2022-12-03T12:36:24.503424Z"; unique_ptr parser(new SyslogParser()); parser->m_expressions = testExpressions; @@ -176,7 +177,7 @@ TEST(syslog_parser, lua_code_valid_2) { lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); - bool success = parser->parseMessage("NOTIFICATION: received from neighbor 10.10.24.216 active 6/2 (Administrative Shutdown) 0 bytes", tag, paramDict, luaState); + bool success = parser->parseMessage("Dec 3 12:36:24.503424 NOTIFICATION: received from neighbor 10.10.24.216 active 6/2 (Administrative Shutdown) 0 bytes", tag, paramDict, luaState); EXPECT_EQ(true, success); EXPECT_EQ("test_tag", tag); EXPECT_EQ(expectedDict, paramDict); @@ -230,12 +231,11 @@ TEST(rsyslog_plugin, onMessage_noParams) { } TEST(timestampFormatter, changeTimestampFormat) { - string timestampFormatRegex = "([a-zA-Z]{3}) ([0-9]{1,2}) ([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})"; - unique_ptr formatter(new TimestampFormatter(timestampFormatRegex)); + unique_ptr formatter(new TimestampFormatter()); - string timestampOne = "Jul 20 10:09:40.230874"; - string timestampTwo = "Jan 1 00:00:00.000000"; - string timestampThree = "Dec 31 23:59:59.000000"; + vector timestampOne = { "Jul", "20", "10:09:40.230874" }; + vector timestampTwo = { "Jan", "1", "00:00:00.000000" }; + vector timestampThree = { "Dec", "31", "23:59:59.000000" }; string formattedTimestampOne = formatter->changeTimestampFormat(timestampOne); EXPECT_EQ("2022-07-20T10:09:40.230874Z", formattedTimestampOne); From 5605b20e4084e1a5fd6ad09848a7e7597b4adbcd Mon Sep 17 00:00:00 2001 From: Renuka Manavalan Date: Wed, 17 Aug 2022 21:00:45 +0000 Subject: [PATCH 63/66] stats & options update --- rules/docker-eventd.mk | 1 + slave.mk | 3 +- src/sonic-eventd/src/eventd.cpp | 345 ++++++++++++++++-- src/sonic-eventd/src/eventd.h | 131 ++++++- src/sonic-eventd/src/main.cpp | 2 + src/sonic-eventd/tests/eventd_ut.cpp | 309 ++++++++++++++-- src/sonic-eventd/tests/main.cpp | 87 +++++ .../database_config.json | 112 ++++++ .../database_config0.json | 92 +++++ .../database_config1.json | 92 +++++ .../database_global.json | 16 + src/sonic-eventd/tools/events_tool.cpp | 9 +- 12 files changed, 1114 insertions(+), 85 deletions(-) create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config0.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config1.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_global.json diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk index 5a546aca1cb5..c69fee09e569 100644 --- a/rules/docker-eventd.mk +++ b/rules/docker-eventd.mk @@ -43,4 +43,5 @@ $(DOCKER_EVENTD)_PLUGIN = rsyslog_plugin $($(DOCKER_EVENTD)_PLUGIN)_PATH = $($(DOCKER_EVENTD)_FILESPATH) SONIC_COPY_FILES += $($(DOCKER_EVENTD)_PLUGIN) +$(DOCKER_EVENTD)_SHARED_FILES = $($(DOCKER_EVENTD)_PLUGIN) diff --git a/slave.mk b/slave.mk index e6102120785f..bad0083e206f 100644 --- a/slave.mk +++ b/slave.mk @@ -1154,7 +1154,6 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_INSTALLERS)) : $(TARGET_PATH)/% : \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_YANG_MODELS_PY3)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_CTRMGRD)) \ $(addprefix $(FILES_PATH)/,$($(SONIC_CTRMGRD)_FILES)) \ - $(addprefix $(FILES_PATH)/,$($(DOCKER_EVENTD)_PLUGIN)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_YANG_MGMT_PY3)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SYSTEM_HEALTH)) \ $(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_HOST_SERVICES_PY3)) @@ -1293,6 +1292,8 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_INSTALLERS)) : $(TARGET_PATH)/% : \ $(if $($(docker:-dbg.gz=.gz)_MACHINE),\ mv $($(docker:-dbg.gz=.gz)_CONTAINER_NAME).sh $($(docker:-dbg.gz=.gz)_MACHINE)_$($(docker:-dbg.gz=.gz)_CONTAINER_NAME).sh ) + $(foreach file, $($(docker)_SHARED_FILES), \ + { cp $($(file)_PATH)/$(file) $(FILES_PATH)/ $(LOG) || exit 1 ; } ; ) ) # Exported variables are used by sonic_debian_extension.sh diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp index 9b164fbe38b3..1ff9dd8be20b 100644 --- a/src/sonic-eventd/src/eventd.cpp +++ b/src/sonic-eventd/src/eventd.cpp @@ -1,18 +1,30 @@ #include #include "eventd.h" +#include "dbconnector.h" /* - * There are 3 threads, including the main + * There are 5 threads, including the main * - * main thread -- Runs eventd service that accepts commands event_req_type_t + * (0) main thread -- Runs eventd service that accepts commands event_req_type_t * This can be used to control caching events and a no-op echo service. * - * capture/cache service - * Saves all the events between cache start & stop + * (1) capture/cache service + * Saves all the events between cache start & stop. + * Update missed cached counter in memory. + * + * (2) Main proxy service that runs XSUB/XPUB ends + * + * (3) Get stats for total published counter in memory. This thread also sends + * heartbeat message. It accomplishes by counting upon receive missed due + * to event receive timeout. + * + * (4) Thread to update counters from memory to redis periodically. * - * Main proxy service that runs XSUB/XPUB ends */ +using namespace std; +using namespace swss; + #define MB(N) ((N) * 1024 * 1024) #define EVT_SIZE_AVG 150 @@ -23,6 +35,23 @@ #define VEC_SIZE(p) ((int)p.size()) +/* Sock read timeout in milliseconds, to enable look for control signals */ +#define CAPTURE_SOCK_TIMEOUT 800 + +#define HEARTBEAT_INTERVAL_SECS 2 /* Default: 2 seconds */ + +/* Source & tag for heartbeat events */ +#define EVENTD_PUBLISHER_SOURCE "sonic-events-eventd" +#define EVENTD_HEARTBEAT_TAG "heartbeat" + + +const char *counter_keys[COUNTERS_EVENTS_TOTAL] = { + COUNTERS_EVENTS_PUBLISHED, + COUNTERS_EVENTS_MISSED_CACHE +}; + +static bool s_unit_testing = false; + int eventd_proxy::init() { @@ -65,6 +94,193 @@ eventd_proxy::run() } +stats_collector::stats_collector() : + m_shutdown(false), m_pause_heartbeat(false), m_heartbeats_published(0), + m_heartbeats_interval_cnt(0) +{ + set_heartbeat_interval(HEARTBEAT_INTERVAL_SECS); + for (int i=0; i < COUNTERS_EVENTS_TOTAL; ++i) { + m_lst_counters[i] = 0; + } + m_updated = false; +} + + +void +stats_collector::set_heartbeat_interval(int val) +{ + if (val > 0) { + /* Round to highest possible multiples of MIN */ + m_heartbeats_interval_cnt = + (((val * 1000) + STATS_HEARTBEAT_MIN - 1) / STATS_HEARTBEAT_MIN); + } + else if (val == 0) { + /* Least possible */ + m_heartbeats_interval_cnt = 1; + } + else if (val == -1) { + /* Turn off heartbeat */ + m_heartbeats_interval_cnt = 0; + SWSS_LOG_INFO("Heartbeat turned OFF"); + } + /* Any other value is ignored as invalid */ + + SWSS_LOG_INFO("Set heartbeat: val=%d secs cnt=%d min=%d ms final=%d secs", + val, m_heartbeats_interval_cnt, STATS_HEARTBEAT_MIN, + (m_heartbeats_interval_cnt * STATS_HEARTBEAT_MIN / 1000)); +} + + +int +stats_collector::get_heartbeat_interval() +{ + return m_heartbeats_interval_cnt * STATS_HEARTBEAT_MIN / 1000; +} + +int +stats_collector::start() +{ + int rc = -1; + + if (!s_unit_testing) { + try { + m_counters_db = make_shared("COUNTERS_DB", 0, true); + } + catch (exception &e) + { + SWSS_LOG_ERROR("Unable to get DB Connector, e=(%s)\n", e.what()); + } + RET_ON_ERR(m_counters_db != NULL, "Failed to get COUNTERS_DB"); + + m_stats_table = make_shared( + m_counters_db.get(), COUNTERS_EVENTS_TABLE); + RET_ON_ERR(m_stats_table != NULL, "Failed to get events table"); + + m_thr_writer = thread(&stats_collector::run_writer, this); + } + m_thr_collector = thread(&stats_collector::run_collector, this); + rc = 0; +out: + return rc; +} + +void +stats_collector::run_writer() +{ + while (true) { + if (m_updated.exchange(false)) { + /* Update if there had been any update */ + + for (int i = 0; i < COUNTERS_EVENTS_TOTAL; ++i) { + vector fv; + + fv.emplace_back(EVENTS_STATS_FIELD_NAME, to_string(m_lst_counters[i])); + + m_stats_table->set(counter_keys[i], fv); + } + } + if (m_shutdown) { + break; + } + this_thread::sleep_for(chrono::milliseconds(10)); + /* + * After sleep always do an update if needed before checking + * shutdown flag, as any counters collected during sleep + * needs to be updated. + */ + } + + m_stats_table.reset(); + m_counters_db.reset(); +} + +void +stats_collector::run_collector() +{ + int hb_cntr = 0; + string hb_key = string(EVENTD_PUBLISHER_SOURCE) + ":" + EVENTD_HEARTBEAT_TAG; + event_handle_t pub_handle = NULL; + event_handle_t subs_handle = NULL; + + /* + * A subscriber is required to set a subscription. Else all published + * events will be dropped at the point of publishing itself. + */ + pub_handle = events_init_publisher(EVENTD_PUBLISHER_SOURCE); + RET_ON_ERR(pub_handle != NULL, + "failed to create publisher handle for heartbeats"); + + subs_handle = events_init_subscriber(false, STATS_HEARTBEAT_MIN); + RET_ON_ERR(subs_handle != NULL, "failed to subscribe to all"); + + /* + * Though we can count off of capture socket, then we need to duplicate + * code in event_receive which has the logic to count all missed per + * runtime id. It also has logic to retire closed runtime IDs. + * + * So use regular subscriber API w/o cache but timeout to enable + * exit, upon shutdown. + */ + /* + * The collector service runs until shutdown. + * The only task is to update total_published & total_missed_internal. + * The write of these counters into redis is done by another thread. + */ + + while(!m_shutdown) { + event_receive_op_t op; + int rc = 0; + + try { + rc = event_receive(subs_handle, op); + } + catch (exception& e) + { + rc = -1; + stringstream ss; + ss << e.what(); + SWSS_LOG_ERROR("Receive event failed with %s", ss.str().c_str()); + } + + if ((rc == 0) && (op.key != hb_key)) { + /* TODO: Discount EVENT_STR_CTRL_DEINIT messages too */ + increment_published(1+op.missed_cnt); + + /* reset counter on receive to restart. */ + hb_cntr = 0; + } + else { + if (rc < 0) { + SWSS_LOG_ERROR( + "event_receive failed with rc=%d; stats:published(%lu)", rc, + m_lst_counters[INDEX_COUNTERS_EVENTS_PUBLISHED]); + } + if (!m_pause_heartbeat && (m_heartbeats_interval_cnt > 0) && + ++hb_cntr >= m_heartbeats_interval_cnt) { + rc = event_publish(pub_handle, EVENTD_HEARTBEAT_TAG); + if (rc != 0) { + SWSS_LOG_ERROR("Failed to publish heartbeat rc=%d", rc); + } + hb_cntr = 0; + ++m_heartbeats_published; + } + } + } + +out: + /* + * NOTE: A shutdown could lose messages in cache. + * But consider, that eventd shutdown is a critical shutdown as it would + * bring down all other features. Hence done only at system level shutdown, + * hence losing few messages in flight is acceptable. Any more complex code + * to handle is unwanted. + */ + + events_deinit_subscriber(subs_handle); + events_deinit_publisher(pub_handle); + m_shutdown = true; +} + capture_service::~capture_service() { stop_capture(); @@ -110,10 +326,6 @@ validate_event(const internal_event_t &event, runtime_id_t &rid, sequence_t &seq void capture_service::init_capture_cache(const event_serialized_lst_t &lst) { - /* clean any pre-existing cache */ - runtime_id_t rid; - sequence_t seq; - /* Cache given events as initial stock. * Save runtime ID with last seen seq to avoid duplicates, while reading * from capture socket. @@ -123,14 +335,14 @@ capture_service::init_capture_cache(const event_serialized_lst_t &lst) internal_event_t event; if (deserialize(*itc, event) == 0) { + runtime_id_t rid; + sequence_t seq; + if (validate_event(event, rid, seq)) { m_pre_exist_id[rid] = seq; m_events.push_back(*itc); } } - else { - SWSS_LOG_ERROR("failed to serialize cache message from subscriber; DROP"); - } } } @@ -139,10 +351,10 @@ void capture_service::do_capture() { int rc; - int block_ms=300; + int block_ms=CAPTURE_SOCK_TIMEOUT; int init_cnt; - event_handle_t subs_handle = NULL; - void *sock = NULL; + void *cap_sub_sock = NULL; + counters_t total_overflow = 0; typedef enum { /* @@ -161,24 +373,20 @@ capture_service::do_capture() cap_state_t cap_state = CAP_STATE_INIT; /* - * Need subscription for publishers to publish. Start one. - * As we are reading off of capture socket, we don't read from - * this handle. Not reading is a not a concern, as zmq will cache - * few initial messages and rest it will drop. + * Need subscription for publishers to publish. + * The stats collector service already has active subscriber for all. */ - subs_handle = events_init_subscriber(); - RET_ON_ERR(subs_handle != NULL, "failed to subscribe to all"); - sock = zmq_socket(m_ctx, ZMQ_SUB); - RET_ON_ERR(sock != NULL, "failing to get ZMQ_SUB socket"); + cap_sub_sock = zmq_socket(m_ctx, ZMQ_SUB); + RET_ON_ERR(cap_sub_sock != NULL, "failing to get ZMQ_SUB socket"); - rc = zmq_connect(sock, get_config(string(CAPTURE_END_KEY)).c_str()); + rc = zmq_connect(cap_sub_sock, get_config(string(CAPTURE_END_KEY)).c_str()); RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); - rc = zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); + rc = zmq_setsockopt(cap_sub_sock, ZMQ_SUBSCRIBE, "", 0); RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); - rc = zmq_setsockopt(sock, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms)); + rc = zmq_setsockopt(cap_sub_sock, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms)); RET_ON_ERR(rc == 0, "Failed to ZMQ_RCVTIMEO to %d", block_ms); m_cap_run = true; @@ -210,7 +418,7 @@ capture_service::do_capture() internal_event_t event; string source, evt_str; - if ((rc = zmq_message_read(sock, 0, source, event)) != 0) { + if ((rc = zmq_message_read(cap_sub_sock, 0, source, event)) != 0) { /* * The capture socket captures SUBSCRIBE requests too. * The messge could contain subscribe filter strings and binary code. @@ -268,6 +476,9 @@ capture_service::do_capture() m_events.push_back(evt_str); if (VEC_SIZE(m_events) >= m_cache_max) { cap_state = CAP_STATE_LAST; + /* Clear the map, created to ensure memory space available */ + m_last_events.clear(); + m_last_events_init = true; } break; } @@ -282,12 +493,12 @@ capture_service::do_capture() } case CAP_STATE_LAST: - if (!m_last_events_init) { - /* Clear the map, created to ensure memory space available */ - m_last_events.clear(); - m_last_events_init = true; - } + total_overflow++; m_last_events[rid] = evt_str; + if (total_overflow > m_last_events.size()) { + m_total_missed_cache++; + m_stats_instance->increment_missed_cache(1); + } break; } } @@ -297,8 +508,7 @@ capture_service::do_capture() * Capture stop will close the socket which fail the read * and hence bail out. */ - events_deinit_subscriber(subs_handle); - zmq_close(sock); + zmq_close(cap_sub_sock); m_cap_run = false; return; } @@ -365,7 +575,7 @@ capture_service::set_control(capture_control_t ctrl, event_serialized_lst_t *lst int capture_service::read_cache(event_serialized_lst_t &lst_fifo, - last_events_t &lst_last) + last_events_t &lst_last, counters_t &overflow_cnt) { lst_fifo.swap(m_events); if (m_last_events_init) { @@ -375,9 +585,37 @@ capture_service::read_cache(event_serialized_lst_t &lst_fifo, } last_events_t().swap(m_last_events); event_serialized_lst_t().swap(m_events); + overflow_cnt = m_total_missed_cache; return 0; } +static int +process_options(stats_collector *stats, const event_serialized_lst_t &req_data, + event_serialized_lst_t &resp_data) +{ + int ret = -1; + if (!req_data.empty()) { + RET_ON_ERR(req_data.size() == 1, "Expect only one options string %d", + (int)req_data.size()); + const auto &data = nlohmann::json::parse(*(req_data.begin())); + RET_ON_ERR(data.size() == 1, "Only one supported option. Expect 1. size=%d", + (int)data.size()); + const auto it = data.find(GLOBAL_OPTION_HEARTBEAT); + RET_ON_ERR(it != data.end(), "Expect HEARTBEAT_INTERVAL; got %s", + data.begin().key().c_str()); + stats->set_heartbeat_interval(it.value()); + ret = 0; + } + else { + nlohmann::json msg = nlohmann::json::object(); + msg[GLOBAL_OPTION_HEARTBEAT] = stats->get_heartbeat_interval(); + resp_data.push_back(msg.dump()); + ret = 0; + } +out: + return ret; +} + void run_eventd_service() @@ -385,13 +623,14 @@ run_eventd_service() int code = 0; int cache_max; event_service service; + stats_collector stats_instance; eventd_proxy *proxy = NULL; capture_service *capture = NULL; event_serialized_lst_t capture_fifo_events; last_events_t capture_last_events; - SWSS_LOG_ERROR("Eventd service starting\n"); + SWSS_LOG_INFO("Eventd service starting\n"); void *zctx = zmq_ctx_new(); RET_ON_ERR(zctx != NULL, "Failed to get zmq ctx"); @@ -406,15 +645,23 @@ run_eventd_service() RET_ON_ERR(service.init_server(zctx) == 0, "Failed to init service"); + RET_ON_ERR(stats_instance.start() == 0, "Failed to start stats collector"); + + /* Pause heartbeat during caching */ + stats_instance.heartbeat_ctrl(true); + /* * Start cache service, right upon eventd starts so as not to lose * events until telemetry starts. * Telemetry will send a stop & collect cache upon startup */ - capture = new capture_service(zctx, cache_max); + capture = new capture_service(zctx, cache_max, &stats_instance); RET_ON_ERR(capture->set_control(INIT_CAPTURE) == 0, "Failed to init capture"); RET_ON_ERR(capture->set_control(START_CAPTURE) == 0, "Failed to start capture"); + this_thread::sleep_for(chrono::milliseconds(200)); + RET_ON_ERR(stats_instance.is_running(), "Failed to start stats instance"); + while(code != EVENT_EXIT) { int resp = -1; event_serialized_lst_t req_data, resp_data; @@ -431,7 +678,7 @@ run_eventd_service() event_serialized_lst_t().swap(capture_fifo_events); last_events_t().swap(capture_last_events); - capture = new capture_service(zctx, cache_max); + capture = new capture_service(zctx, cache_max, &stats_instance); if (capture != NULL) { resp = capture->set_control(INIT_CAPTURE); } @@ -444,6 +691,9 @@ run_eventd_service() resp = -1; break; } + /* Pause heartbeat during caching */ + stats_instance.heartbeat_ctrl(true); + resp = capture->set_control(START_CAPTURE, &req_data); break; @@ -456,10 +706,15 @@ run_eventd_service() } resp = capture->set_control(STOP_CAPTURE); if (resp == 0) { - resp = capture->read_cache(capture_fifo_events, capture_last_events); + counters_t overflow; + resp = capture->read_cache(capture_fifo_events, capture_last_events, + overflow); } delete capture; capture = NULL; + + /* Unpause heartbeat upon stop caching */ + stats_instance.heartbeat_ctrl(); break; @@ -503,6 +758,10 @@ run_eventd_service() resp_data.swap(req_data); break; + case EVENT_OPTIONS: + resp = process_options(&stats_instance, req_data, resp_data); + break; + case EVENT_EXIT: resp = 0; break; @@ -517,6 +776,8 @@ run_eventd_service() } out: service.close_service(); + stats_instance.stop(); + if (proxy != NULL) { delete proxy; } @@ -529,3 +790,9 @@ run_eventd_service() SWSS_LOG_ERROR("Eventd service exiting\n"); } +void set_unit_testing(bool b) +{ + s_unit_testing = b; +} + + diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h index 6273497b0cc2..8411223b35be 100644 --- a/src/sonic-eventd/src/eventd.h +++ b/src/sonic-eventd/src/eventd.h @@ -1,15 +1,31 @@ /* * Header file for eventd daemon */ +#include "table.h" #include "events_service.h" #include "events.h" +#include "events_wrap.h" + +#define ARRAY_SIZE(l) (sizeof(l)/sizeof((l)[0])) typedef map last_events_t; +/* stat counters */ +typedef uint64_t counters_t; + +typedef enum { + INDEX_COUNTERS_EVENTS_PUBLISHED, + INDEX_COUNTERS_EVENTS_MISSED_CACHE, + COUNTERS_EVENTS_TOTAL +} stats_counter_index_t; + +#define EVENTS_STATS_FIELD_NAME "value" +#define STATS_HEARTBEAT_MIN 300 + /* * Started by eventd_service. * Creates XPUB & XSUB end points. - onicanalytics.azurecr.io Bind the same + * Bind the same * Create a PUB socket end point for capture and bind. * Call run_proxy method with sockets in a dedicated thread. * Thread runs forever until the zmq context is terminated. @@ -42,6 +58,104 @@ class eventd_proxy }; +class stats_collector +{ + public: + stats_collector(); + + ~stats_collector() { stop(); } + + int start(); + + void stop() { + + m_shutdown = true; + + if (m_thr_collector.joinable()) { + m_thr_collector.join(); + } + + if (m_thr_writer.joinable()) { + m_thr_writer.join(); + } + } + + void increment_published(counters_t val) { + _update_stats(INDEX_COUNTERS_EVENTS_PUBLISHED, val); + } + + void increment_missed_cache(counters_t val) { + _update_stats(INDEX_COUNTERS_EVENTS_MISSED_CACHE, val); + } + + counters_t read_counter(stats_counter_index_t index) { + if (index != COUNTERS_EVENTS_TOTAL) { + return m_lst_counters[index]; + } + else { + return 0; + } + } + + /* Sets heartbeat interval in milliseconds */ + void set_heartbeat_interval(int val_in_ms); + + /* + * Get heartbeat interval in milliseconds + * NOTE: Set & get value may not match as the value is rounded + * to a multiple of smallest possible interval. + */ + int get_heartbeat_interval(); + + /* A way to pause heartbeat */ + void heartbeat_ctrl(bool pause = false) { + m_pause_heartbeat = pause; + SWSS_LOG_INFO("Set heartbeat_ctrl pause=%d", pause); + } + + uint64_t heartbeats_published() const { + return m_heartbeats_published; + } + + bool is_running() + { + return !m_shutdown; + } + + private: + void _update_stats(stats_counter_index_t index, counters_t val) { + if (index != COUNTERS_EVENTS_TOTAL) { + m_lst_counters[index] += val; + m_updated = true; + } + else { + SWSS_LOG_ERROR("Internal code error. Invalid index=%d", index); + } + } + + void run_collector(); + + void run_writer(); + + atomic m_updated; + + counters_t m_lst_counters[COUNTERS_EVENTS_TOTAL]; + + bool m_shutdown; + + thread m_thr_collector; + thread m_thr_writer; + + shared_ptr m_counters_db; + shared_ptr m_stats_table; + + bool m_pause_heartbeat; + + uint64_t m_heartbeats_published; + + int m_heartbeats_interval_cnt; +}; + /* * Capture/Cache service * @@ -94,8 +208,10 @@ typedef enum { class capture_service { public: - capture_service(void *ctx, int cache_max) : m_ctx(ctx), m_cap_run(false), - m_ctrl(NEED_INIT), m_cache_max(cache_max), m_last_events_init(false) + capture_service(void *ctx, int cache_max, stats_collector *stats) : + m_ctx(ctx), m_stats_instance(stats), m_cap_run(false), + m_ctrl(NEED_INIT), m_cache_max(cache_max), + m_last_events_init(false), m_total_missed_cache(0) {} ~capture_service(); @@ -103,7 +219,7 @@ class capture_service int set_control(capture_control_t ctrl, event_serialized_lst_t *p=NULL); int read_cache(event_serialized_lst_t &lst_fifo, - last_events_t &lst_last); + last_events_t &lst_last, counters_t &overflow_cnt); private: void init_capture_cache(const event_serialized_lst_t &lst); @@ -112,6 +228,8 @@ class capture_service void stop_capture(); void *m_ctx; + stats_collector *m_stats_instance; + bool m_cap_run; capture_control_t m_ctrl; thread m_thr; @@ -126,6 +244,8 @@ class capture_service typedef map pre_exist_id_t; pre_exist_id_t m_pre_exist_id; + counters_t m_total_missed_cache; + }; @@ -144,4 +264,5 @@ class capture_service */ void run_eventd_service(); - +/* To help skip redis access during unit testing */ +void set_unit_testing(bool b); diff --git a/src/sonic-eventd/src/main.cpp b/src/sonic-eventd/src/main.cpp index ea058d5fac14..7a20497f0986 100644 --- a/src/sonic-eventd/src/main.cpp +++ b/src/sonic-eventd/src/main.cpp @@ -5,7 +5,9 @@ void run_eventd_service(); int main() { + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); SWSS_LOG_INFO("The eventd service started"); + SWSS_LOG_ERROR("ERR:The eventd service started"); run_eventd_service(); diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp index d6f07f23de0f..399255edb2b8 100644 --- a/src/sonic-eventd/tests/eventd_ut.cpp +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -11,8 +11,10 @@ #include "../src/eventd.h" using namespace std; +using namespace swss; -#define ARRAY_SIZE(p) ((int)(sizeof(p) / sizeof((p)[0]))) +extern bool g_is_redis_available; +extern const char *counter_keys[]; typedef struct { int id; @@ -28,16 +30,8 @@ internal_event_t create_ev(const test_data_t &data) { internal_event_t event_data; - { - string param_str; - - EXPECT_EQ(0, serialize(data.params, param_str)); - - map_str_str_t event_str_map = { { data.source + ":" + data.tag, param_str}}; - - EXPECT_EQ(0, serialize(event_str_map, event_data[EVENT_STR_DATA])); - } - + event_data[EVENT_STR_DATA] = convert_to_json( + data.source + ":" + data.tag, data.params); event_data[EVENT_RUNTIME_ID] = data.rid; event_data[EVENT_SEQUENCE] = data.seq; @@ -226,21 +220,8 @@ void run_pub(void *mock_pub, const string wr_source, internal_events_lst_t &lst) } -void debug_on() -{ - /* compile with -D DEBUG_TEST or add "#define DEBUG_TEST" to include. */ -#ifdef DEBUG_TEST - /* Direct log messages to stdout */ - string dummy, op("STDOUT"); - swss::Logger::swssOutputNotify(dummy, op); - swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); -#endif -} - TEST(eventd, proxy) { - debug_on(); - printf("Proxy TEST started\n"); bool term_sub = false; bool term_cap = false; @@ -310,12 +291,12 @@ TEST(eventd, proxy) TEST(eventd, capture) { printf("Capture TEST started\n"); - debug_on(); bool term_sub = false; string sub_source; int sub_evts_sz = 0; internal_events_lst_t sub_evts; + stats_collector stats_instance; /* run_pub details */ string wr_source("hello"); @@ -328,6 +309,7 @@ TEST(eventd, capture) /* startup strings; expected list & read list from capture */ event_serialized_lst_t evts_start, evts_expect, evts_read; last_events_t last_evts_exp, last_evts_read; + counters_t overflow, overflow_exp = 0; void *zctx = zmq_ctx_new(); EXPECT_TRUE(NULL != zctx); @@ -343,7 +325,7 @@ TEST(eventd, capture) thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ - capture_service *pcap = new capture_service(zctx, cache_max); + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); /* Expect START_CAPTURE */ EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); @@ -352,7 +334,7 @@ TEST(eventd, capture) EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); EXPECT_TRUE(init_cache > 1); - EXPECT_TRUE((cache_max+3) < ARRAY_SIZE(ldata)); + EXPECT_TRUE((cache_max+3) < (int)ARRAY_SIZE(ldata)); /* Collect few serailized strings of events for startup cache */ for(int i=0; i < init_cache; ++i) { @@ -369,7 +351,7 @@ TEST(eventd, capture) * Hence i=1, when first init_cache events are already * in crash. */ - for(int i=1; i < ARRAY_SIZE(ldata); ++i) { + for(int i=1; i < (int)ARRAY_SIZE(ldata); ++i) { internal_event_t ev(create_ev(ldata[i])); string evt_str; @@ -385,8 +367,10 @@ TEST(eventd, capture) } else { /* collect last entries for overflow */ last_evts_exp[ldata[i].rid] = evt_str; + overflow_exp++; } } + overflow_exp -= (int)last_evts_exp.size(); EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); @@ -406,13 +390,14 @@ TEST(eventd, capture) term_sub = true; /* Read the cache */ - EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); #ifdef DEBUG_TEST if ((evts_read.size() != evts_expect.size()) || (last_evts_read.size() != last_evts_exp.size())) { printf("size: sub_evts_sz=%d sub_evts=%d\n", sub_evts_sz, (int)sub_evts.size()); printf("init_cache=%d cache_max=%d\n", init_cache, cache_max); + printf("overflow=%ul overflow_exp=%ul\n", overflow, overflow_exp); printf("evts_start=%d evts_expect=%d evts_read=%d\n", (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); printf("last_evts_exp=%d last_evts_read=%d\n", (int)last_evts_exp.size(), @@ -424,6 +409,7 @@ TEST(eventd, capture) EXPECT_EQ(evts_read, evts_expect); EXPECT_EQ(last_evts_read.size(), last_evts_exp.size()); EXPECT_EQ(last_evts_read, last_evts_exp); + EXPECT_EQ(overflow, overflow_exp); delete pxy; pxy = NULL; @@ -445,7 +431,6 @@ TEST(eventd, capture) TEST(eventd, captureCacheMax) { printf("Capture TEST with matchinhg cache-max started\n"); - debug_on(); /* * Need to run subscriber; Else publisher would skip publishing @@ -455,6 +440,7 @@ TEST(eventd, captureCacheMax) string sub_source; int sub_evts_sz = 0; internal_events_lst_t sub_evts; + stats_collector stats_instance; /* run_pub details */ string wr_source("hello"); @@ -467,6 +453,7 @@ TEST(eventd, captureCacheMax) /* startup strings; expected list & read list from capture */ event_serialized_lst_t evts_start, evts_expect, evts_read; last_events_t last_evts_read; + counters_t overflow; void *zctx = zmq_ctx_new(); EXPECT_TRUE(NULL != zctx); @@ -482,7 +469,7 @@ TEST(eventd, captureCacheMax) thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); /* Create capture service */ - capture_service *pcap = new capture_service(zctx, cache_max); + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); /* Expect START_CAPTURE */ EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); @@ -502,7 +489,7 @@ TEST(eventd, captureCacheMax) * Collect events to publish for capture to cache * re-publishing some events sent in cache. */ - for(int i=1; i < ARRAY_SIZE(ldata); ++i) { + for(int i=1; i < (int)ARRAY_SIZE(ldata); ++i) { internal_event_t ev(create_ev(ldata[i])); string evt_str; @@ -535,7 +522,7 @@ TEST(eventd, captureCacheMax) term_sub = true; /* Read the cache */ - EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read)); + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); #ifdef DEBUG_TEST if ((evts_read.size() != evts_expect.size()) || @@ -545,11 +532,13 @@ TEST(eventd, captureCacheMax) printf("evts_start=%d evts_expect=%d evts_read=%d\n", (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); printf("last_evts_read=%d\n", (int)last_evts_read.size()); + printf("overflow=%ul overflow_exp=%ul\n", overflow, overflow_exp); } #endif EXPECT_EQ(evts_read, evts_expect); EXPECT_TRUE(last_evts_read.empty()); + EXPECT_EQ(overflow, 0); delete pxy; pxy = NULL; @@ -568,7 +557,6 @@ TEST(eventd, captureCacheMax) printf("Capture TEST with matchinhg cache-max completed\n"); } - TEST(eventd, service) { /* @@ -580,7 +568,6 @@ TEST(eventd, service) * TEST(eventd, capture) has already tested caching. */ printf("Service TEST started\n"); - debug_on(); /* startup strings; expected list & read list from capture */ event_service service; @@ -594,6 +581,11 @@ TEST(eventd, service) * It uses its own zmq context * It starts to capture too. */ + + if (!g_is_redis_available) { + set_unit_testing(true); + } + thread thread_service(&run_eventd_service); /* Need client side service to interact with server side */ @@ -603,6 +595,7 @@ TEST(eventd, service) /* eventd_service starts cache too; Test this caching */ /* Init pub connection */ void *mock_pub = init_pub(zctx); + EXPECT_TRUE(NULL != mock_pub); internal_events_lst_t wr_evts; int wr_sz = 2; @@ -640,6 +633,7 @@ TEST(eventd, service) /* Test normal cache op; init, start & stop via event_service APIs */ int init_cache = 4; /* provided along with start capture */ event_serialized_lst_t evts_start, evts_read; + vector evts_start_int; EXPECT_TRUE(init_cache > 1); @@ -649,6 +643,7 @@ TEST(eventd, service) string evt_str; serialize(ev, evt_str); evts_start.push_back(evt_str); + evts_start_int.push_back(ev); } @@ -663,7 +658,32 @@ TEST(eventd, service) /* Read the cache */ EXPECT_EQ(0, service.cache_read(evts_read)); - EXPECT_EQ(evts_read, evts_start); + if (evts_read != evts_start) { + vector evts_read_int; + + for (event_serialized_lst_t::const_iterator itc = evts_read.begin(); + itc != evts_read.end(); ++itc) { + internal_event_t event; + + if (deserialize(*itc, event) == 0) { + evts_read_int.push_back(event); + } + } + EXPECT_EQ(evts_read_int, evts_start_int); + } + } + + { + string set_opt_bad("{\"HEARTBEAT_INTERVAL\": 2000, \"OFFLINE_CACHE_SIZE\": 500}"); + string set_opt_good("{\"HEARTBEAT_INTERVAL\":5}"); + char buff[100]; + buff[0] = 0; + + EXPECT_EQ(-1, service.global_options_set(set_opt_bad.c_str())); + EXPECT_EQ(0, service.global_options_set(set_opt_good.c_str())); + EXPECT_LT(0, service.global_options_get(buff, sizeof(buff))); + + EXPECT_EQ(set_opt_good, string(buff)); } EXPECT_EQ(0, service.send_recv(EVENT_EXIT)); @@ -676,3 +696,220 @@ TEST(eventd, service) printf("Service TEST completed\n"); } + +void +wait_for_heartbeat(stats_collector &stats_instance, long unsigned int cnt, + int wait_ms = 3000) +{ + int diff = 0; + + auto st = duration_cast(system_clock::now().time_since_epoch()).count(); + while (stats_instance.heartbeats_published() == cnt) { + auto en = duration_cast(system_clock::now().time_since_epoch()).count(); + diff = en - st; + if (diff > wait_ms) { + EXPECT_LE(diff, wait_ms); + EXPECT_EQ(cnt, stats_instance.heartbeats_published()); + break; + } + else { + stringstream ss; + ss << (en -st); + } + this_thread::sleep_for(chrono::milliseconds(300)); + } +} + +TEST(eventd, heartbeat) +{ + printf("heartbeat TEST started\n"); + + int rc; + long unsigned int cnt; + stats_collector stats_instance; + + if (!g_is_redis_available) { + set_unit_testing(true); + } + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + rc = stats_instance.start(); + EXPECT_EQ(rc, 0); + + /* Wait for any non-zero heartbeat */ + wait_for_heartbeat(stats_instance, 0); + + /* Pause heartbeat */ + stats_instance.heartbeat_ctrl(true); + + /* Sleep to ensure the other thread noticed the pause request. */ + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Get current count */ + cnt = stats_instance.heartbeats_published(); + + /* Wait for 3 seconds with no new neartbeat */ + this_thread::sleep_for(chrono::seconds(3)); + + EXPECT_EQ(stats_instance.heartbeats_published(), cnt); + + /* Set interval as 1 second */ + stats_instance.set_heartbeat_interval(1); + + /* Turn on heartbeat */ + stats_instance.heartbeat_ctrl(); + + /* Wait for heartbeat count to change from last count */ + wait_for_heartbeat(stats_instance, cnt, 2000); + + stats_instance.stop(); + + delete pxy; + + zmq_ctx_term(zctx); + + printf("heartbeat TEST completed\n"); +} + + +TEST(eventd, testDB) +{ + printf("DB TEST started\n"); + + /* consts used */ + const int pub_count = 7; + const int cache_max = 3; + + stats_collector stats_instance; + event_handle_t pub_handle; + event_serialized_lst_t evts_read; + last_events_t last_evts_read; + counters_t overflow; + string tag; + + if (!g_is_redis_available) { + printf("redis not available; Hence DB TEST skipped\n"); + return; + } + + EXPECT_LT(cache_max, pub_count); + DBConnector db("COUNTERS_DB", 0, true); + + + /* Not testing heartbeat; Hence set high val as 10 seconds */ + stats_instance.set_heartbeat_interval(10000); + + /* Start instance to capture published count & as well writes to DB */ + EXPECT_EQ(0, stats_instance.start()); + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run proxy to enable receive as capture test needs to receive */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); + + /* Initialize the capture */ + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + + /* Kick off capture */ + EXPECT_EQ(0, pcap->set_control(START_CAPTURE)); + + pub_handle = events_init_publisher("test_db"); + + for(int i=0; i < pub_count; ++i) { + tag = string("test_db_tag_") + to_string(i); + event_publish(pub_handle, tag); + } + + /* Pause to ensure all publisghed events did reach capture service */ + this_thread::sleep_for(chrono::milliseconds(200)); + + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); + + /* + * Sent pub_count messages of different tags. + * Upon cache max, only event per sender/runtime-id is saved. Hence + * expected last_evts_read is one. + * expected overflow = pub_count - cache_max - 1 + */ + + EXPECT_EQ(cache_max, (int)evts_read.size()); + EXPECT_EQ(1, (int)last_evts_read.size()); + EXPECT_EQ((pub_count - cache_max - 1), overflow); + + EXPECT_EQ(pub_count, stats_instance.read_counter( + INDEX_COUNTERS_EVENTS_PUBLISHED)); + EXPECT_EQ((pub_count - cache_max - 1), stats_instance.read_counter( + INDEX_COUNTERS_EVENTS_MISSED_CACHE)); + + events_deinit_publisher(pub_handle); + + for (int i=0; i < COUNTERS_EVENTS_TOTAL; ++i) { + string key = string("COUNTERS_EVENTS:") + counter_keys[i]; + unordered_map m; + bool key_found = false, val_found=false, val_match=false; + + if (db.exists(key)) { + try { + m = db.hgetall(key); + unordered_map::const_iterator itc = + m.find(string(EVENTS_STATS_FIELD_NAME)); + if (itc != m.end()) { + int expect = (counter_keys[i] == string(COUNTERS_EVENTS_PUBLISHED) ? + pub_count : (pub_count - cache_max - 1)); + val_match = (expect == stoi(itc->second) ? true : false); + val_found = true; + } + } + catch (exception &e) + { + printf("Failed to get key=(%s) err=(%s)", key.c_str(), e.what()); + EXPECT_TRUE(false); + } + key_found = true; + } + + if (!val_match) { + printf("key=%s key_found=%d val_found=%d fields=%d", + key.c_str(), key_found, val_found, (int)m.size()); + + printf("hgetall BEGIN key=%s", key.c_str()); + for(unordered_map::const_iterator itc = m.begin(); + itc != m.end(); ++itc) { + printf("val[%s] = (%s)", itc->first.c_str(), itc->second.c_str()); + } + printf("hgetall END\n"); + EXPECT_TRUE(false); + } + } + + stats_instance.stop(); + + delete pxy; + delete pcap; + + zmq_ctx_term(zctx); + + printf("DB TEST completed\n"); +} + + +// TODO -- Add unit tests for stats diff --git a/src/sonic-eventd/tests/main.cpp b/src/sonic-eventd/tests/main.cpp index f803fbc39d5a..2858e13f8d67 100644 --- a/src/sonic-eventd/tests/main.cpp +++ b/src/sonic-eventd/tests/main.cpp @@ -1,10 +1,97 @@ #include "gtest/gtest.h" +#include "dbconnector.h" #include using namespace std; +using namespace swss; + +string existing_file = "./tests/redis_multi_db_ut_config/database_config.json"; +string nonexisting_file = "./tests/redis_multi_db_ut_config/database_config_nonexisting.json"; +string global_existing_file = "./tests/redis_multi_db_ut_config/database_global.json"; + +#define TEST_DB "APPL_DB" +#define TEST_NAMESPACE "asic0" +#define INVALID_NAMESPACE "invalid" + +bool g_is_redis_available = false; + +class SwsscommonEnvironment : public ::testing::Environment { +public: + // Override this to define how to set up the environment. + void SetUp() override { + // by default , init should be false + cout<<"Default : isInit = "<= 0, "Missed count uninitialized"); + ASSERT(evt.publish_epoch_ms > 0, "publish_epoch_ms uninitialized"); total_missed += evt.missed_cnt; From edfde1e0a657930aac128ac08d38fa5d86fda1a6 Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Wed, 17 Aug 2022 16:26:46 -0700 Subject: [PATCH 64/66] Add python tool to publish events for testing (#7) * Add python tool to publish events for testing * Copy events publish tool to /usr/sbin * Add functionality to read from file * Add ability to read tag from file and take source as param * Add changes per peer review Co-authored-by: Ubuntu --- src/sonic-eventd/Makefile | 2 + src/sonic-eventd/tools/events_publish_tool.py | 98 +++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 src/sonic-eventd/tools/events_publish_tool.py diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile index 6493a732312a..00d3199a65bc 100644 --- a/src/sonic-eventd/Makefile +++ b/src/sonic-eventd/Makefile @@ -2,6 +2,7 @@ RM := rm -rf EVENTD_TARGET := eventd EVENTD_TEST := tests/tests EVENTD_TOOL := tools/events_tool +EVENTD_PUBLISH_TOOL := tools/events_publish_tool.py RSYSLOG-PLUGIN_TARGET := rsyslog_plugin/rsyslog_plugin RSYSLOG-PLUGIN_TEST := rsyslog_plugin_tests/tests CP := cp @@ -70,6 +71,7 @@ install: $(MKDIR) -p $(DESTDIR)/usr/sbin $(CP) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin $(CP) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_PUBLISH_TOOL) $(DESTDIR)/usr/sbin deinstall: $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) diff --git a/src/sonic-eventd/tools/events_publish_tool.py b/src/sonic-eventd/tools/events_publish_tool.py new file mode 100644 index 000000000000..f629e02f850b --- /dev/null +++ b/src/sonic-eventd/tools/events_publish_tool.py @@ -0,0 +1,98 @@ +from swsscommon.swsscommon import events_init_publisher, events_deinit_publisher, event_publish, FieldValueMap +import time +import sys +import ipaddress +import random +import argparse +import json +import re +import logging + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers = [ + logging.FileHandler("debug.log"), + logging.StreamHandler(sys.stdout) + ] +) + +def getTag(sourceTag): + try: + return sourceTag.split(":", 1)[1] + except Exception as ex: + logging.info("Unable to find : in :tag\n") + return sourceTag + +def getFVMFromParams(params): + param_dict = FieldValueMap() + for key, value in params.items(): + key = str(key) + value = str(value) + param_dict[key] = value + return param_dict + +def publishEvents(line, publisher_handle): + try: + json_dict = json.loads(line) + except Exception as ex: + logging.error("JSON string not able to be parsed\n") + return + if not json_dict or len(json_dict) != 1: + logging.error("JSON string not able to be parsed\n") + return + sourceTag = list(json_dict)[0] + params = list(json_dict.values())[0] + tag = getTag(sourceTag) + param_dict = getFVMFromParams(params) + if param_dict: + event_publish(publisher_handle, tag, param_dict) + +def publishEventsFromFile(publisher_handle, infile, count, pause): + try: + with open(infile, 'r') as f: + for line in f.readlines(): + line.rstrip() + publishEvents(line, publisher_handle) + time.sleep(pause) + except Exception as ex: + logging.error("Unable to open file from given path or has incorrect json format, gives exception {}\n".format(ex)) + logging.info("Switching to default bgp state publish events\n") + publishBGPEvents(publisher_handle, count, pause) + +def publishBGPEvents(publisher_handle, count, pause): + ip_addresses = [] + param_dict = FieldValueMap() + + for _ in range(count): + ip = str(ipaddress.IPv4Address(random.randint(0, 2 ** 32))) + ip_addresses.append(ip) + + # publish down events + for ip in ip_addresses: + param_dict["ip"] = ip + param_dict["status"] = "down" + event_publish(publisher_handle, "bgp-state", param_dict) + time.sleep(pause) + + # publish up events + for ip in ip_addresses: + param_dict["ip"] = ip + event_publish(publisher_handle, "bgp-state", param_dict) + time.sleep(pause) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-s", "--source", nargs='?', const='test-event-source', default='test-event-source', help="Source of event, default us test-event-source") + parser.add_argument("-f", "--file", nargs='?', const='', default='', help="File containing json event strings, must be in format \'{\":foo\": {\"aaa\": \"AAA\", \"bbb\": \"BBB\"}}\'") + parser.add_argument("-c", "--count", nargs='?', type=int, const=10, default=10, help="Count of default bgp events to be generated") + parser.add_argument("-p", "--pause", nargs='?', type=float, const=0.0, default=0.0, help="Pause time wanted between each event, default is 0") + args = parser.parse_args() + publisher_handle = events_init_publisher(args.source) + if args.file == '': + publishBGPEvents(publisher_handle, args.count, args.pause) + else: + publishEventsFromFile(publisher_handle, args.file, args.count, args.pause) + +if __name__ == "__main__": + main() From 2059906b7506a538ba302ed410b8d4d32d3d3705 Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Wed, 24 Aug 2022 23:06:13 -0700 Subject: [PATCH 65/66] Add volume test (#10) * Add volume test * Update volume test with comments --- src/sonic-eventd/tools/events_volume_test.py | 68 ++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 src/sonic-eventd/tools/events_volume_test.py diff --git a/src/sonic-eventd/tools/events_volume_test.py b/src/sonic-eventd/tools/events_volume_test.py new file mode 100644 index 000000000000..73143d483cd8 --- /dev/null +++ b/src/sonic-eventd/tools/events_volume_test.py @@ -0,0 +1,68 @@ +import sys +import subprocess +import time +import logging +import argparse + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers = [ + logging.FileHandler("debug.log"), + logging.StreamHandler(sys.stdout) + ] +) + +def read_events_from_file(file, count): + logging.info("Reading from file generated by events_tool") + lines = 0 + with open(file, 'r') as infile: + lines = infile.readlines() + logging.info("Should receive {} events and got {} events\n".format(count, len(lines))) + assert len(lines) == count + +def start_tool(file): + logging.info("Starting events_tool\n") + proc = subprocess.Popen(["./events_tool", "-r", "-o", file]) + return proc + +def run_test(process, file, count, duplicate): + # log messages to see if events have been received + tool_proc = start_tool(file) + + time.sleep(2) # buffer for events_tool to startup + logging.info("Generating logger messages\n") + for i in range(count): + line = "" + state = "up" + if duplicate: + line = "{} test message testmessage state up".format(process) + else: + if i % 2 != 1: + state = "down" + line = "{} test message testmessage{} state {}".format(process, i, state) + command = "logger -p local0.notice -t {}".format(line) + subprocess.run(command, shell=True, stdout=subprocess.PIPE) + + time.sleep(2) # some buffer for all events to be published to file + read_events_from_file(file, count) + tool_proc.terminate() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--process", nargs='?', const ='', default='', help="Process that is spitting out log") + parser.add_argument("-f", "--file", nargs='?', const='', default='', help="File used by events_tool to read events from") + parser.add_argument("-c", "--count", type=int, nargs='?', const=1000, default=1000, help="Count of times log message needs to be published down/up, default is 1000") + args = parser.parse_args() + if(args.process == '' or args.file == ''): + logging.error("Invalid process or logfile\n") + return + logging.info("Starting volume test\n") + logging.info("Generating {} unique messages for rsyslog plugin\n".format(args.count)) + run_test(args.process, args.file, args.count, False) + time.sleep(2) + logging.info("Restarting volume test but for duplicate log messages\n") + run_test(args.process, args.file, args.count, True) + +if __name__ == "__main__": + main() From 2d09ad4c4c5d2829d967caadec69fd249e813049 Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Thu, 25 Aug 2022 10:47:21 -0700 Subject: [PATCH 66/66] Fix pr comments (#11) * Fix PR comments * Update with PR review comments * Fix LGTM warnings of publish tool * Fix bug in unit test * Change to ip to match yang model * Add changes per peer review --- dockers/docker-fpm-frr/bgp_regex.json | 2 +- .../rsyslog_plugin/rsyslog_plugin.cpp | 50 ++++++-- .../rsyslog_plugin/syslog_parser.cpp | 48 ++++---- .../rsyslog_plugin/syslog_parser.h | 16 ++- .../rsyslog_plugin_ut.cpp | 108 ++++++++++-------- src/sonic-eventd/tools/events_publish_tool.py | 3 +- 6 files changed, 139 insertions(+), 88 deletions(-) diff --git a/dockers/docker-fpm-frr/bgp_regex.json b/dockers/docker-fpm-frr/bgp_regex.json index aa27824386d1..898b5b060ebe 100644 --- a/dockers/docker-fpm-frr/bgp_regex.json +++ b/dockers/docker-fpm-frr/bgp_regex.json @@ -2,7 +2,7 @@ { "tag": "bgp-state", "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .(up|down).", - "params": [ "peer_ip", "status" ] + "params": [ "ip", "status" ] } ] diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp index d40f5f00f3a8..3786c5f0fea9 100644 --- a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -25,50 +25,80 @@ bool RsyslogPlugin::onMessage(string msg, lua_State* luaState) { } } +void parseParams(vector params, vector& eventParams) { + for(long unsigned int i = 0; i < params.size(); i++) { + if(params[i].empty()) { + SWSS_LOG_ERROR("Empty param provided in regex file\n"); + continue; + } + EventParam ep = EventParam(); + auto delimPos = params[i].find(':'); + if(delimPos == string::npos) { // no lua code + ep.paramName = params[i]; + } else { + ep.paramName = params[i].substr(0, delimPos); + ep.luaCode = params[i].substr(delimPos + 1); + if(ep.luaCode.empty()) { + SWSS_LOG_ERROR("Lua code missing after :\n"); + } + } + eventParams.push_back(ep); + } +} + bool RsyslogPlugin::createRegexList() { fstream regexFile; + json jsonList = json::array(); regexFile.open(m_regexPath, ios::in); if (!regexFile) { SWSS_LOG_ERROR("No such path exists: %s for source %s\n", m_regexPath.c_str(), m_moduleName.c_str()); return false; } try { - regexFile >> m_parser->m_regexList; + regexFile >> jsonList; } catch (invalid_argument& iaException) { SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", m_regexPath.c_str(), iaException.what()); return false; } string regexString; + string timestampRegex = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*"; regex expression; + vector regexList; - for(long unsigned int i = 0; i < m_parser->m_regexList.size(); i++) { + for(long unsigned int i = 0; i < jsonList.size(); i++) { + RegexStruct rs = RegexStruct(); + vector eventParams; try { - string timestampRegex = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*"; - string eventRegex = m_parser->m_regexList[i]["regex"]; + string eventRegex = jsonList[i]["regex"]; regexString = timestampRegex + eventRegex; - string tag = m_parser->m_regexList[i]["tag"]; - vector params = m_parser->m_regexList[i]["params"]; + string tag = jsonList[i]["tag"]; + vector params = jsonList[i]["params"]; vector timestampParams = { "month", "day", "time" }; params.insert(params.begin(), timestampParams.begin(), timestampParams.end()); - m_parser->m_regexList[i]["params"] = params; regex expr(regexString); expression = expr; - } catch (domain_error& deException) { + parseParams(params, eventParams); + rs.params = eventParams; + rs.tag = tag; + rs.regexExpression = expression; + regexList.push_back(rs); + } catch (domain_error& deException) { SWSS_LOG_ERROR("Missing required key, throws exception: %s\n", deException.what()); return false; } catch (regex_error& reException) { SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", reException.what()); return false; } - m_parser->m_expressions.push_back(expression); } - if(m_parser->m_expressions.empty()) { + if(regexList.empty()) { SWSS_LOG_ERROR("Empty list of regex expressions.\n"); return false; } + m_parser->m_regexList = regexList; + regexFile.close(); return true; } diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp index 5fe043e76ca2..03e0cdf4c233 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -14,49 +14,45 @@ bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap, lua_State* luaState) { for(long unsigned int i = 0; i < m_regexList.size(); i++) { smatch matchResults; - vector params = m_regexList[i]["params"]; - if(!regex_search(message, matchResults, m_expressions[i]) || params.size() != matchResults.size() - 1 || matchResults.size() < 4) { + if(!regex_search(message, matchResults, m_regexList[i].regexExpression) || m_regexList[i].params.size() != matchResults.size() - 1 || matchResults.size() < 4) { continue; } - + string formattedTimestamp; if(!matchResults[1].str().empty() && !matchResults[2].str().empty() && !matchResults[3].str().empty()) { // found timestamp components - string formattedTimestamp = m_timestampFormatter->changeTimestampFormat({ matchResults[1].str(), matchResults[2].str(), matchResults[3].str() }); - if(!formattedTimestamp.empty()) { - paramMap["timestamp"] = formattedTimestamp; - } else { - SWSS_LOG_ERROR("Timestamp is invalid and is not able to be formatted"); - } + formattedTimestamp = m_timestampFormatter->changeTimestampFormat({ matchResults[1].str(), matchResults[2].str(), matchResults[3].str() }); } + if(!formattedTimestamp.empty()) { + paramMap["timestamp"] = formattedTimestamp; + } else { + SWSS_LOG_ERROR("Timestamp is invalid and is not able to be formatted"); + } + // found matching regex - eventTag = m_regexList[i]["tag"]; + eventTag = m_regexList[i].tag; // check params for lua code - for(long unsigned int j = 3; j < params.size(); j++) { - auto delimPos = params[j].find(':'); + for(long unsigned int j = 3; j < m_regexList[i].params.size(); j++) { string resultValue = matchResults[j + 1].str(); - if(delimPos == string::npos) { // no lua code - paramMap[params[j]] = resultValue; + string paramName = m_regexList[i].params[j].paramName; + const char* luaCode = m_regexList[i].params[j].luaCode.c_str(); + + if(luaCode == NULL || *luaCode == 0) { + SWSS_LOG_INFO("Invalid lua code, empty or missing"); + paramMap[paramName] = resultValue; continue; } - // have to execute lua script - string param = params[j].substr(0, delimPos); - string luaString = params[j].substr(delimPos + 1); - if(luaString.empty()) { // empty lua code - SWSS_LOG_INFO("Lua code missing after :, skipping operation"); - paramMap[param] = resultValue; - continue; - } - const char* luaCode = luaString.c_str(); + + // execute lua code lua_pushstring(luaState, resultValue.c_str()); lua_setglobal(luaState, "arg"); if(luaL_dostring(luaState, luaCode) == 0) { lua_pop(luaState, lua_gettop(luaState)); - } else { + } else { // error in lua code SWSS_LOG_ERROR("Invalid lua code, unable to do operation.\n"); - paramMap[param] = resultValue; + paramMap[paramName] = resultValue; continue; } lua_getglobal(luaState, "ret"); - paramMap[param] = lua_tostring(luaState, -1); + paramMap[paramName] = lua_tostring(luaState, -1); lua_pop(luaState, 1); } return true; diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h index 5ae2a04b7567..6293eb3c4a34 100644 --- a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -18,6 +18,17 @@ extern "C" using namespace std; using json = nlohmann::json; +struct EventParam { + string paramName; + string luaCode; +}; + +struct RegexStruct { + regex regexExpression; + vector params; + string tag; +}; + /** * Syslog Parser is responsible for parsing log messages fed by rsyslog.d and returns * matched result to rsyslog_plugin to use with events publish API @@ -25,10 +36,9 @@ using json = nlohmann::json; */ class SyslogParser { -public: +public: unique_ptr m_timestampFormatter; - vector m_expressions; - json m_regexList = json::array(); + vector m_regexList; bool parseMessage(string message, string& tag, event_params_t& paramDict, lua_State* luaState); SyslogParser(); }; diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp index 82369039d823..be5a19ad5a5b 100644 --- a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -19,17 +19,30 @@ using namespace std; using namespace swss; using json = nlohmann::json; +vector createEventParams(vector params, vector luaCodes) { + vector eventParams; + for(long unsigned int i = 0; i < params.size(); i++) { + EventParam ep = EventParam(); + ep.paramName = params[i]; + ep.luaCode = luaCodes[i]; + eventParams.push_back(ep); + } + return eventParams; +} + TEST(syslog_parser, matching_regex) { json jList = json::array(); - vector testExpressions; + vector regexList; string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*) even_more_data (.*)"; - json jTest; - jTest["tag"] = "test_tag"; - jTest["regex"] = regexString; - jTest["params"] = { "month", "day", "time", "message", "other_data", "even_more_data" }; - jList.push_back(jTest); + vector params = { "month", "day", "time", "message", "other_data", "even_more_data" }; + vector luaCodes = { "", "", "", "", "", "" }; regex expression(regexString); - testExpressions.push_back(expression); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); string tag; event_params_t paramDict; @@ -40,8 +53,7 @@ TEST(syslog_parser, matching_regex) { expectedDict["even_more_data"] = "test_data"; unique_ptr parser(new SyslogParser()); - parser->m_expressions = testExpressions; - parser->m_regexList = jList; + parser->m_regexList = regexList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); @@ -55,15 +67,17 @@ TEST(syslog_parser, matching_regex) { TEST(syslog_parser, matching_regex_timestamp) { json jList = json::array(); - vector testExpressions; + vector regexList; string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*)"; - json jTest; - jTest["tag"] = "test_tag"; - jTest["regex"] = regexString; - jTest["params"] = { "month", "day", "time", "message", "other_data" }; - jList.push_back(jTest); + vector params = { "month", "day", "time", "message", "other_data" }; + vector luaCodes = { "", "", "", "", "" }; regex expression(regexString); - testExpressions.push_back(expression); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); string tag; event_params_t paramDict; @@ -74,8 +88,7 @@ TEST(syslog_parser, matching_regex_timestamp) { expectedDict["timestamp"] = "2022-07-21T02:10:00.000000Z"; unique_ptr parser(new SyslogParser()); - parser->m_expressions = testExpressions; - parser->m_regexList = jList; + parser->m_regexList = regexList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); @@ -89,22 +102,23 @@ TEST(syslog_parser, matching_regex_timestamp) { TEST(syslog_parser, no_matching_regex) { json jList = json::array(); - vector testExpressions; - string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\s*no match"; - json jTest; - jTest["tag"] = "test_tag"; - jTest["regex"] = regexString; - jTest["params"] = { "month", "day", "time" }; - jList.push_back(jTest); + vector regexList; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*no match"; + vector params = { "month", "day", "time" }; + vector luaCodes = { "", "", "" }; regex expression(regexString); - testExpressions.push_back(expression); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); string tag; event_params_t paramDict; unique_ptr parser(new SyslogParser()); - parser->m_expressions = testExpressions; - parser->m_regexList = jList; + parser->m_regexList = regexList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); @@ -116,15 +130,17 @@ TEST(syslog_parser, no_matching_regex) { TEST(syslog_parser, lua_code_valid_1) { json jList = json::array(); - vector testExpressions; + vector regexList; string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; - json jTest; - jTest["tag"] = "test_tag"; - jTest["regex"] = regexString; - jTest["params"] = { "month", "day", "time", "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; - jList.push_back(jTest); + vector params = { "month", "day", "time", "is-sent", "ip", "major-code", "minor-code" }; + vector luaCodes = { "", "", "", "ret=tostring(arg==\"sent\")", "", "", "" }; regex expression(regexString); - testExpressions.push_back(expression); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); string tag; event_params_t paramDict; @@ -136,8 +152,7 @@ TEST(syslog_parser, lua_code_valid_1) { expectedDict["minor-code"] = "2"; unique_ptr parser(new SyslogParser()); - parser->m_expressions = testExpressions; - parser->m_regexList = jList; + parser->m_regexList = regexList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); @@ -151,15 +166,17 @@ TEST(syslog_parser, lua_code_valid_1) { TEST(syslog_parser, lua_code_valid_2) { json jList = json::array(); - vector testExpressions; + vector regexList; string regexString = "([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; - json jTest; - jTest["tag"] = "test_tag"; - jTest["regex"] = regexString; - jTest["params"] = { "month", "day", "time", "is-sent:ret=tostring(arg==\"sent\")", "ip", "major-code", "minor-code" }; - jList.push_back(jTest); + vector params = { "month", "day", "time", "is-sent", "ip", "major-code", "minor-code" }; + vector luaCodes = { "", "", "", "ret=tostring(arg==\"sent\")", "", "", "" }; regex expression(regexString); - testExpressions.push_back(expression); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); string tag; event_params_t paramDict; @@ -172,8 +189,7 @@ TEST(syslog_parser, lua_code_valid_2) { expectedDict["timestamp"] = "2022-12-03T12:36:24.503424Z"; unique_ptr parser(new SyslogParser()); - parser->m_expressions = testExpressions; - parser->m_regexList = jList; + parser->m_regexList = regexList; lua_State* luaState = luaL_newstate(); luaL_openlibs(luaState); diff --git a/src/sonic-eventd/tools/events_publish_tool.py b/src/sonic-eventd/tools/events_publish_tool.py index f629e02f850b..df2cbc8012a1 100644 --- a/src/sonic-eventd/tools/events_publish_tool.py +++ b/src/sonic-eventd/tools/events_publish_tool.py @@ -1,11 +1,10 @@ -from swsscommon.swsscommon import events_init_publisher, events_deinit_publisher, event_publish, FieldValueMap +from swsscommon.swsscommon import events_init_publisher, event_publish, FieldValueMap import time import sys import ipaddress import random import argparse import json -import re import logging logging.basicConfig(