From 1d66a2b3ff9f0948839e5ceaf5aeebce40576783 Mon Sep 17 00:00:00 2001 From: sanmalho-git <66454160+sanmalho-git@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:43:52 -0400 Subject: [PATCH 001/117] Changing ipv6 loopback addresses on the linecards to be on different subnets and adding t2 as a supported topology in veos (#3797) In bgpd.main.conf.j2 in sonic-buildimage (http://github.com/Azure/sonic-buildimage/blob/master/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2#L77), Ipv6 Loopback0 addresses are using a 64 bit mask. In t2 topology, we have 2 linecards, and we were assigning two different IPv6 addresses: - fc00:10::1/128 - fc00:10::2/128 However, in bgpd.main.conf.j2 in sonic-buildimage (http://github.com/Azure/sonic-buildimage/blob/master/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2#L77), ipv6 Loopback0 addresses are using a 64 bit mask. Thus, the route to the Loopback0 address of the remote linecard was being masked by the local Loopback0 address, and not pointing to the inband port. Fix for this is to use different subnets for each linecard. So, changing the Loopback0 addresses to: - fc00:10::1/128 - fc00:11::1/128 Also, t2 topology was missing from veos as a supported topology --- ansible/vars/topo_t2-vs.yml | 2 +- ansible/vars/topo_t2.yml | 4 ++-- ansible/veos | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ansible/vars/topo_t2-vs.yml b/ansible/vars/topo_t2-vs.yml index fde1ce3a1b5..acdc630f19b 100644 --- a/ansible/vars/topo_t2-vs.yml +++ b/ansible/vars/topo_t2-vs.yml @@ -31,7 +31,7 @@ topology: - 10.1.0.2/32 ipv6: - FC00:10::1/128 - - FC00:10::2/128 + - FC00:11::1/128 vs_chassis: inband_port: - 30 diff --git a/ansible/vars/topo_t2.yml b/ansible/vars/topo_t2.yml index 5d7c26ddf31..af7291dd855 100644 --- a/ansible/vars/topo_t2.yml +++ b/ansible/vars/topo_t2.yml @@ -326,8 +326,8 @@ topology: - 10.1.0.3/32 ipv6: - FC00:10::1/128 - - FC00:10::2/128 - - FC00:10::3/128 + - FC00:11::1/128 + - FC00:12::1/128 configuration_properties: common: diff --git a/ansible/veos b/ansible/veos index 077bf1c7bbd..7fe24b8f453 100644 --- a/ansible/veos +++ b/ansible/veos @@ -34,6 +34,7 @@ all: - tgen-t0-3 - tgen-t1-3-lag - mgmttor + - t2 children: server_1: server_2: From 212e71ed691b6a4e373f45bd9bdba55686619344 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Wed, 21 Jul 2021 10:51:17 -0700 Subject: [PATCH 002/117] Revert "Revert "Snappi sample test (#3653)" (#3766)" (#3806) This reverts commit 1b53581d1d32239a5b133b8c9000c0911746e698. Reverts #3766 since the build is using the new docker --- tests/common/snappi/__init__.py | 1 + tests/common/snappi/common_helpers.py | 656 +++++++++++++++++++++++++ tests/common/snappi/port.py | 95 ++++ tests/common/snappi/qos_fixtures.py | 108 ++++ tests/common/snappi/snappi_fixtures.py | 413 ++++++++++++++++ tests/common/snappi/snappi_helpers.py | 220 +++++++++ tests/snappi/test_snappi.py | 186 +++++++ 7 files changed, 1679 insertions(+) create mode 100644 tests/common/snappi/__init__.py create mode 100644 tests/common/snappi/common_helpers.py create mode 100644 tests/common/snappi/port.py create mode 100644 tests/common/snappi/qos_fixtures.py create mode 100644 tests/common/snappi/snappi_fixtures.py create mode 100644 tests/common/snappi/snappi_helpers.py create mode 100644 tests/snappi/test_snappi.py diff --git a/tests/common/snappi/__init__.py b/tests/common/snappi/__init__.py new file mode 100644 index 00000000000..a2a91b4c03f --- /dev/null +++ b/tests/common/snappi/__init__.py @@ -0,0 +1 @@ +# Place for snappi fixtures. diff --git a/tests/common/snappi/common_helpers.py b/tests/common/snappi/common_helpers.py new file mode 100644 index 00000000000..f8a6f48e078 --- /dev/null +++ b/tests/common/snappi/common_helpers.py @@ -0,0 +1,656 @@ +"""This module contains some auxiliary functions that are required +to support automation activities. These functions are used for various +secondary activities like convert the ansible Unicode STDOUT output +to string, get IP address in a subnet, increment an IP address, get +VLAN subnet etc. + +This file is also a placeholder for auxiliary function that are +required for supporting automation with Snappi devices in future: +like collecting diagnostics, uploading and downloading files +to/from API server, processing the statistics after obtaining them +in .csv format etc. +""" + +import ipaddr +from netaddr import IPNetwork +from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice + + +def increment_ip_address(ip, incr=1): + """ + Increment IP address by an integer number. + + Args: + ip (str): IP address in string format. + incr (int): Increment by the specified number. + + Return: + IP address in the argument incremented by the given integer. + """ + ipaddress = ipaddr.IPv4Address(ip) + ipaddress = ipaddress + incr + return_value = ipaddress._string_from_ip_int(ipaddress._ip) + return(return_value) + + +def ansible_stdout_to_str(ansible_stdout): + """ + The stdout of Ansible host is essentially a list of unicode characters. + This function converts it to a string. + + Args: + ansible_stdout: stdout of Ansible + + Returns: + Return a string + """ + result = "" + for x in ansible_stdout: + result += x.encode('UTF8') + return result + + +def get_vlan_subnet(host_ans): + """ + Get VLAN subnet of a T0 device + + Args: + host_ans: Ansible host instance of the device + + Returns: + VLAN subnet, e.g., "192.168.1.1/24" where 192.168.1.1 is gateway + and 24 is prefix length + """ + mg_facts = host_ans.minigraph_facts(host=host_ans.hostname)['ansible_facts'] + mg_vlans = mg_facts['minigraph_vlans'] + + if len(mg_vlans) != 1: + print 'There should be only one Vlan at the DUT' + return None + + mg_vlan_intfs = mg_facts['minigraph_vlan_interfaces'] + prefix_len = mg_vlan_intfs[0]['prefixlen'] + gw_addr = ansible_stdout_to_str(mg_vlan_intfs[0]['addr']) + return gw_addr + '/' + str(prefix_len) + + +def get_egress_lossless_buffer_size(host_ans): + """ + Get egress lossless buffer size of a switch + + Args: + host_ans: Ansible host instance of the device + + Returns: + total switch buffer size in byte (int) + """ + config_facts = host_ans.config_facts(host=host_ans.hostname, + source="running")['ansible_facts'] + + if "BUFFER_POOL" not in config_facts.keys(): + return None + + buffer_pools = config_facts['BUFFER_POOL'] + profile_name = 'egress_lossless_pool' + + if profile_name not in buffer_pools.keys(): + return None + + egress_lossless_pool = buffer_pools[profile_name] + return int(egress_lossless_pool['size']) + + +def get_addrs_in_subnet(subnet, number_of_ip): + """ + Get N IP addresses in a subnet. + + Args: + subnet (str): IPv4 subnet, e.g., '192.168.1.1/24' + number_of_ip (int): Number of IP addresses to get + + Return: + Return n IPv4 addresses in this subnet in a list. + """ + ip_addr = subnet.split('/')[0] + ip_addrs = [str(x) for x in list(IPNetwork(subnet))] + ip_addrs.remove(ip_addr) + + """ Try to avoid network and broadcast addresses """ + if len(ip_addrs) >= number_of_ip + 2: + del ip_addrs[0] + del ip_addrs[-1] + + return ip_addrs[:number_of_ip] + + +def get_peer_snappi_chassis(conn_data, dut_hostname): + """ + Get the Snappi chassis connected to the DUT + Note that a DUT can only be connected to a Snappi chassis + + Args: + conn_data (dict): the dictionary returned by conn_graph_fact. + Example format of the conn_data is given below: + + {u'device_conn': {u'sonic-s6100-dut': + {u'Ethernet64': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port1', + u'speed': u'100000'}, + u'Ethernet68': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port2', + u'speed': u'100000'}, + u'Ethernet72': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port3', + u'speed': u'100000'}, + u'Ethernet76': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port4', + u'speed': u'100000'}}}, + u'device_console_info': {u'sonic-s6100-dut': {}}, + u'device_console_link': {u'sonic-s6100-dut': {}}, + u'device_info': {u'sonic-s6100-dut': + {u'HwSku': u'Arista-7060CX-32S-C32', + u'Type': u'DevSonic'}}, + u'device_pdu_info': {u'sonic-s6100-dut': {}}, + u'device_pdu_links': {u'sonic-s6100-dut': {}}, + u'device_port_vlans': {u'sonic-s6100-dut': + {u'Ethernet64': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet68': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet72': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet76': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}}}, + u'device_vlan_list': {u'sonic-s6100-dut': [2, 2, 2, 2]}, + u'device_vlan_map_list': {u'sonic-s6100-dut': {u'19': 2}}, + u'device_vlan_range': {u'sonic-s6100-dut': [u'2']}} + + dut_hostname (str): hostname of the DUT + + Returns: + The name of the peer Snappi chassis or None + """ + + device_conn = conn_data['device_conn'] + if dut_hostname not in device_conn: + return None + + dut_device_conn = device_conn[dut_hostname] + peer_devices = [dut_device_conn[port]['peerdevice'] for port in dut_device_conn] + peer_devices = list(set(peer_devices)) + + if len(peer_devices) == 1: + return peer_devices[0] + else: + return None + + +def get_peer_port(conn_data, dut_hostname, dut_intf): + """ + Get the peer port of the DUT port + + Args: + conn_data (dict): the dictionary returned by conn_graph_fact. + Example format of the conn_data is given below: + + {u'device_conn': {u'sonic-s6100-dut': + {u'Ethernet64': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port1', + u'speed': u'100000'}, + u'Ethernet68': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port2', + u'speed': u'100000'}, + u'Ethernet72': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port3', + u'speed': u'100000'}, + u'Ethernet76': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port4', + u'speed': u'100000'}}}, + u'device_console_info': {u'sonic-s6100-dut': {}}, + u'device_console_link': {u'sonic-s6100-dut': {}}, + u'device_info': {u'sonic-s6100-dut': + {u'HwSku': u'Arista-7060CX-32S-C32', + u'Type': u'DevSonic'}}, + u'device_pdu_info': {u'sonic-s6100-dut': {}}, + u'device_pdu_links': {u'sonic-s6100-dut': {}}, + u'device_port_vlans': {u'sonic-s6100-dut': + {u'Ethernet64': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet68': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet72': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet76': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}}}, + u'device_vlan_list': {u'sonic-s6100-dut': [2, 2, 2, 2]}, + u'device_vlan_map_list': {u'sonic-s6100-dut': {u'19': 2}}, + u'device_vlan_range': {u'sonic-s6100-dut': [u'2']}} + + dut_hostname (str): hostname of the DUT + dut_intf (str): name of DUT interface + + Returns: + The name of the peer port or None + """ + device_conn = conn_data['device_conn'] + if dut_hostname not in device_conn: + return None + + dut_device_conn = device_conn[dut_hostname] + if dut_intf not in dut_device_conn: + return None + + return dut_device_conn[dut_intf]['peerport'] + + +def get_dut_intfs(conn_data, dut_hostname): + """ + Get DUT's interfaces + + Args: + conn_data (dict): the dictionary returned by conn_graph_fact. + Example format of the conn_data is given below: + + {u'device_conn': {u'sonic-s6100-dut': + {u'Ethernet64': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port1', + u'speed': u'100000'}, + u'Ethernet68': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port2', + u'speed': u'100000'}, + u'Ethernet72': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port3', + u'speed': u'100000'}, + u'Ethernet76': {u'peerdevice': u'snappi-sonic', + u'peerport': u'Card4/Port4', + u'speed': u'100000'}}}, + u'device_console_info': {u'sonic-s6100-dut': {}}, + u'device_console_link': {u'sonic-s6100-dut': {}}, + u'device_info': {u'sonic-s6100-dut': + {u'HwSku': u'Arista-7060CX-32S-C32', + u'Type': u'DevSonic'}}, + u'device_pdu_info': {u'sonic-s6100-dut': {}}, + u'device_pdu_links': {u'sonic-s6100-dut': {}}, + u'device_port_vlans': {u'sonic-s6100-dut': + {u'Ethernet64': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet68': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet72': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}, + u'Ethernet76': {u'mode': u'Access', + u'vlanids': u'2', + u'vlanlist': [2]}}}, + u'device_vlan_list': {u'sonic-s6100-dut': [2, 2, 2, 2]}, + u'device_vlan_map_list': {u'sonic-s6100-dut': {u'19': 2}}, + u'device_vlan_range': {u'sonic-s6100-dut': [u'2']}} + + dut_hostname (str): hostname of the DUT + + Returns: + Return the list of interface names + """ + + device_conn = conn_data['device_conn'] + if dut_hostname not in device_conn: + return None + + dut_device_conn = device_conn[dut_hostname] + return list(dut_device_conn.keys()) + + +def pfc_class_enable_vector(prio_list): + """ + Calculate class-enable vector field in PFC PAUSE frames + + Args: + prio_list (list): list of priorities to pause, e.g., [3, 4] + + Returns: + Return class-enable vector + """ + vector = 0 + + for p in prio_list: + vector += (2**p) + + return "{:x}".format(vector) + + +def get_wred_profiles(host_ans): + """ + Get all the WRED/ECN profiles of a SONiC switch + + Args: + host_ans: Ansible host instance of the device + + Returns: + WRED/ECN profiles (dictionary) or None. + Example format is given below: + + { + u'AZURE_LOSSLESS': { + u'ecn': u'ecn_all', + u'green_drop_probability': u'5', + u'green_max_threshold': u'2097152', + u'green_min_threshold': u'250000', + u'red_drop_probability': u'5', + u'red_max_threshold': u'2097152', + u'red_min_threshold': u'1048576', + u'wred_green_enable': u'true', + u'wred_red_enable': u'true', + u'wred_yellow_enable': u'true', + u'yellow_drop_probability': u'5', + u'yellow_max_threshold': u'2097152', + u'yellow_min_threshold': u'1048576' + } + } + """ + config_facts = host_ans.config_facts(host=host_ans.hostname, + source="running")['ansible_facts'] + + if "WRED_PROFILE" in config_facts.keys(): + return config_facts['WRED_PROFILE'] + else: + return None + + +def config_wred(host_ans, kmin, kmax, pmax, profile=None): + """ + Config a WRED/ECN profile of a SONiC switch + + Args: + host_ans: Ansible host instance of the device + kmin (int): RED/ECN minimum threshold in bytes + kmax (int): RED/ECN maximum threshold in bytes + pmax (int): RED/ECN maximum marking probability in percentage + profile (str): name of profile to configure (None means any profile) + + Returns: + If configuration succeeds (bool) + """ + + if not isinstance(kmin, int) or \ + not isinstance(kmax, int) or \ + not isinstance(pmax, int): + return False + + if kmin < 0 or kmax < 0 or pmax < 0 or pmax > 100 or kmin > kmax: + return False + + profiles = get_wred_profiles(host_ans) + """ Cannot find any WRED/ECN profiles """ + if profiles is None: + return False + + """ Cannot find the profile to configure at the device """ + if profile is not None and profile not in profiles: + return False + + for p in profiles: + """ This is not the profile to configure """ + if profile is not None and profile != p: + continue + + kmin_old = int(profiles[p]['green_min_threshold']) + kmax_old = int(profiles[p]['green_max_threshold']) + + if kmin_old > kmax_old: + return False + + """ Ensure that Kmin is no larger than Kmax during the update """ + if kmin > kmin_old: + host_ans.shell('sudo ecnconfig -p {} -gmax {}'.format(p, kmax)) + host_ans.shell('sudo ecnconfig -p {} -gmin {}'.format(p, kmin)) + + else: + host_ans.shell('sudo ecnconfig -p {} -gmin {}'.format(p, kmin)) + host_ans.shell('sudo ecnconfig -p {} -gmax {}'.format(p, kmax)) + + return True + + +def enable_ecn(host_ans, prio): + """ + Enable ECN marking on a priority + + Args: + host_ans: Ansible host instance of the device + prio (int): priority + + Returns: + N/A + """ + host_ans.shell('sudo ecnconfig -q {} on'.format(prio)) + + +def disable_ecn(host_ans, prio): + """ + Disable ECN marking on a priority + + Args: + host_ans: Ansible host instance of the device + prio (int): priority + + Returns: + N/A + """ + host_ans.shell('sudo ecnconfig -q {} off'.format(prio)) + + +def config_buffer_alpha(host_ans, profile, alpha_log2): + """ + Configure buffer threshold (a.k.a., alpha) + + Args: + host_ans: Ansible host instance of the device + profile (str): buffer profile name + alpha_log2 (int): set threshold to 2^alpha_log2 + + Returns: + N/A + """ + host_ans.shell('sudo mmuconfig -p {} -a {}'.format(profile, alpha_log2)) + + +def config_ingress_lossless_buffer_alpha(host_ans, alpha_log2): + """ + Configure ingress buffer thresholds (a.k.a., alpha) of a device to 2^alpha_log2 + + Args: + host_ans: Ansible host instance of the device + alpha_log2 (int): set threshold to 2^alpha_log2 + + Returns: + If configuration succeeds (bool) + """ + if not isinstance(alpha_log2, int): + return False + + config_facts = host_ans.config_facts(host=host_ans.hostname, + source="running")['ansible_facts'] + + if "BUFFER_PROFILE" not in config_facts.keys(): + return False + + buffer_profiles = config_facts['BUFFER_PROFILE'] + ingress_profiles = [] + for profile in buffer_profiles: + if profile.startswith('ingress_lossless') or profile.startswith('pg_lossless'): + ingress_profiles.append(profile) + + for profile in ingress_profiles: + config_buffer_alpha(host_ans=host_ans, profile=profile, alpha_log2=alpha_log2) + + """ Check if configuration succeeds """ + config_facts = host_ans.config_facts(host=host_ans.hostname, + source="running")['ansible_facts'] + + for profile in ingress_profiles: + dynamic_th = config_facts['BUFFER_PROFILE'][profile]['dynamic_th'] + if int(dynamic_th) != alpha_log2: + return False + + return True + + +def get_pfcwd_config_attr(host_ans, config_scope, attr): + """ + Get PFC watchdog configuration attribute + + Args: + host_ans: Ansible host instance of the device + config_scope (str): 'GLOBAL' or interface name + attr (str): config attribute name, e.g., 'detection_time' + + Returns: + config attribute (str) or None + """ + config_facts = host_ans.config_facts(host=host_ans.hostname, + source="running")['ansible_facts'] + + if 'PFC_WD' not in config_facts.keys(): + return None + + pfcwd_config = config_facts['PFC_WD'] + if config_scope not in pfcwd_config: + return None + + config = pfcwd_config[config_scope] + if attr in config: + return config[attr] + + return None + + +def get_pfcwd_poll_interval(host_ans): + """ + Get PFC watchdog polling interval + + Args: + host_ans: Ansible host instance of the device + + Returns: + Polling interval in ms (int) or None + """ + val = get_pfcwd_config_attr(host_ans=host_ans, + config_scope='GLOBAL', + attr='POLL_INTERVAL') + + if val is not None: + return int(val) + + return None + + +def get_pfcwd_detect_time(host_ans, intf): + """ + Get PFC watchdog detection time of a given interface + + Args: + host_ans: Ansible host instance of the device + intf (str): interface name + + Returns: + Detection time in ms (int) or None + """ + val = get_pfcwd_config_attr(host_ans=host_ans, + config_scope=intf, + attr='detection_time') + + if val is not None: + return int(val) + + return None + + +def get_pfcwd_restore_time(host_ans, intf): + """ + Get PFC watchdog restoration time of a given interface + + Args: + host_ans: Ansible host instance of the device + intf (str): interface name + + Returns: + Restoration time in ms (int) or None + """ + val = get_pfcwd_config_attr(host_ans=host_ans, + config_scope=intf, + attr='restoration_time') + + if val is not None: + return int(val) + + return None + + +def start_pfcwd(duthost): + """ + Start PFC watchdog with default setting + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + N/A + """ + duthost.shell('sudo pfcwd start_default') + + +def stop_pfcwd(duthost): + """ + Stop PFC watchdog + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + N/A + """ + duthost.shell('sudo pfcwd stop') + + +def disable_packet_aging(duthost): + """ + Disable packet aging feature (only on MLNX switches) + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + N/A + """ + if isMellanoxDevice(duthost): + duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp") + duthost.command("docker cp /tmp/packets_aging.py syncd:/") + duthost.command("docker exec syncd python /packets_aging.py disable") + duthost.command("docker exec syncd rm -rf /packets_aging.py") + + +def enable_packet_aging(duthost): + """ + Enable packet aging feature (only on MLNX switches) + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + N/A + """ + if isMellanoxDevice(duthost): + duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp") + duthost.command("docker cp /tmp/packets_aging.py syncd:/") + duthost.command("docker exec syncd python /packets_aging.py enable") + duthost.command("docker exec syncd rm -rf /packets_aging.py") diff --git a/tests/common/snappi/port.py b/tests/common/snappi/port.py new file mode 100644 index 00000000000..6beca66ce8b --- /dev/null +++ b/tests/common/snappi/port.py @@ -0,0 +1,95 @@ +import enum + + +class SnappiPortType(enum.Enum): + """ + Snappi port type + """ + IPInterface = 1 + PortChannelMember = 2 + VlanMember = 3 + + +class SnappiPortConfig: + """ + Snappi port configuration information + """ + def __init__(self, id, ip, mac, gw, gw_mac, prefix_len, port_type, peer_port): + self.id = id + self.ip = ip + self.mac = mac + self.gateway = gw + self.gateway_mac = gw_mac + self.prefix_len = prefix_len + self.type = port_type + self.peer_port = peer_port + + +def select_ports(port_config_list, pattern, rx_port_id): + """ + Given a traffic pattern, select Snappi ports to send and receive traffic + + Args: + port_config_list (list of SnappiPortConfig): + pattern (str): traffic pattern, "many to one" or "all to all" + rx_port_id (int): ID of the port that should receive traffic, e.g., + recever in "many to one" traffic pattern + + Returns: + tx_port_id_list (list): IDs of Snappi ports to send traffic + rx_port_id_list (list): IDs of Snappi ports to receive traffic + """ + tx_port_id_list = [] + rx_port_id_list = [] + + patterns = ['all to all', 'many to one'] + if pattern not in patterns: + raise ValueError('invalid traffic pattern passed in "{}", must be {}'.format( + pattern, ' or '.join(['"{}"'.format(src) for src in patterns]))) + + rx_port_config = next((x for x in port_config_list if x.id == rx_port_id), None) + if rx_port_config is None: + raise ValueError('Fail to find configuration for RX port') + + if pattern == "many to one": + rx_port_id_list = [rx_port_id] + """ Interfaces in the same portchannel cannot send traffic to each other """ + if rx_port_config.type == SnappiPortType.PortChannelMember: + tx_port_id_list = [x.id for x in port_config_list \ + if x.ip != rx_port_config.ip] + else: + tx_port_id_list = [x.id for x in port_config_list if x.id != rx_port_id] + + elif pattern == "all to all": + """ Interfaces in the same portchannel cannot send traffic to each other """ + if rx_port_config.type == SnappiPortType.PortChannelMember: + tx_port_id_list = [x.id for x in port_config_list \ + if x.ip != rx_port_config.ip] + tx_port_id_list.append(rx_port_id) + else: + tx_port_id_list = [x.id for x in port_config_list] + + rx_port_id_list = [x for x in tx_port_id_list] + + return tx_port_id_list, rx_port_id_list + + +def select_tx_port(tx_port_id_list, rx_port_id): + """ + Select an Snappi port to send traffic + + Args: + tx_port_id_list (list): IDs of ports that can send traffic + rx_port_id (int): ID of the port that should receive traffic + + Returns: + ID of the port to send traffic (int) or None (if we fail to find it) + """ + if len(tx_port_id_list) == 0: + return None + + max_tx_port_id = max(tx_port_id_list) + if max_tx_port_id < rx_port_id: + return max_tx_port_id + else: + return min(x for x in tx_port_id_list if x > rx_port_id) diff --git a/tests/common/snappi/qos_fixtures.py b/tests/common/snappi/qos_fixtures.py new file mode 100644 index 00000000000..5d410ee875a --- /dev/null +++ b/tests/common/snappi/qos_fixtures.py @@ -0,0 +1,108 @@ +import pytest + +""" +RDMA test cases may require variety of fixtures. This +file currently holds the following fixture(s): + 1. prio_dscp_map + 2. all_prio_list + 3. lossless_prio_list + 4. lossy_prio_list +""" + + +@pytest.fixture(scope="module") +def prio_dscp_map(duthosts, rand_one_dut_hostname): + """ + This fixture reads the QOS parameters from SONiC DUT, and creates + priority Vs. DSCP priority port map + + Args: + duthosts (pytest fixture) : list of DUTs + rand_one_dut_hostname (pytest fixture): DUT hostname + + Returns: + Priority vs. DSCP map (dictionary, key = priority). + Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....} + """ + duthost = duthosts[rand_one_dut_hostname] + config_facts = duthost.config_facts(host=duthost.hostname, + source="running")['ansible_facts'] + + if "DSCP_TO_TC_MAP" not in config_facts.keys(): + return None + + dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"] + if len(dscp_to_tc_map_lists) != 1: + return None + + profile = dscp_to_tc_map_lists.keys()[0] + dscp_to_tc_map = dscp_to_tc_map_lists[profile] + + result = {} + for dscp in dscp_to_tc_map: + tc = int(dscp_to_tc_map[dscp]) + result.setdefault(tc, []).append(int(dscp)) + + return result + + +@pytest.fixture(scope="module") +def all_prio_list(prio_dscp_map): + """ + This fixture returns the list of all the priorities + + Args: + prio_dscp_map (pytest fixture) : Priority vs. DSCP map + + Returns: + All the priorities (list) + """ + return list(prio_dscp_map.keys()) + + +@pytest.fixture(scope="module") +def lossless_prio_list(duthosts, rand_one_dut_hostname): + """ + This fixture returns the list of lossless priorities + + Args: + duthosts (pytest fixture) : list of DUTs + rand_one_dut_hostname (pytest fixture): DUT hostname + + Returns: + Lossless priorities (list) + """ + duthost = duthosts[rand_one_dut_hostname] + config_facts = duthost.config_facts(host=duthost.hostname, + source="running")['ansible_facts'] + + if "PORT_QOS_MAP" not in config_facts.keys(): + return None + + port_qos_map = config_facts["PORT_QOS_MAP"] + if len(port_qos_map.keys()) == 0: + return None + + """ Here we assume all the ports have the same lossless priorities """ + intf = port_qos_map.keys()[0] + if 'pfc_enable' not in port_qos_map[intf]: + return None + + result = [int(x) for x in port_qos_map[intf]['pfc_enable'].split(',')] + return result + + +@pytest.fixture(scope="module") +def lossy_prio_list(all_prio_list, lossless_prio_list): + """ + This fixture returns the list of lossu priorities + + Args: + all_prio_list (pytest fixture) : all the priorities + lossless_prio_list (pytest fixture): lossless priorities + + Returns: + Lossy priorities (list) + """ + result = [x for x in all_prio_list if x not in lossless_prio_list] + return result diff --git a/tests/common/snappi/snappi_fixtures.py b/tests/common/snappi/snappi_fixtures.py new file mode 100644 index 00000000000..69103d828f3 --- /dev/null +++ b/tests/common/snappi/snappi_fixtures.py @@ -0,0 +1,413 @@ +""" +This module contains the snappi fixture +""" +import pytest +import snappi +from ipaddress import ip_address, IPv4Address +from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\ + fanout_graph_facts +from tests.common.snappi.common_helpers import get_addrs_in_subnet,\ + get_peer_snappi_chassis +from tests.common.snappi.snappi_helpers import SnappiFanoutManager, get_snappi_port_location +from tests.common.snappi.port import SnappiPortConfig, SnappiPortType +from tests.common.helpers.assertions import pytest_assert + + +@pytest.fixture(scope="module") +def snappi_api_serv_ip(tbinfo): + """ + In a Snappi testbed, there is no PTF docker. + Hence, we use ptf_ip field to store snappi API server. + This fixture returns the IP address of the snappi API server. + Args: + tbinfo (pytest fixture): fixture provides information about testbed + Returns: + snappi API server IP + """ + return tbinfo['ptf_ip'] + + +@pytest.fixture(scope="module") +def snappi_api_serv_port(duthosts, rand_one_dut_hostname): + """ + This fixture returns the TCP Port of the Snappi API server. + Args: + duthost (pytest fixture): The duthost fixture. + Returns: + snappi API server port. + """ + duthost = duthosts[rand_one_dut_hostname] + return (duthost.host.options['variable_manager']. + _hostvars[duthost.hostname]['secret_group_vars'] + ['snappi_api_server']['rest_port']) + + +@pytest.fixture(scope='module') +def snappi_api(snappi_api_serv_ip, + snappi_api_serv_port): + """ + Fixture for session handle, + for creating snappi objects and making API calls. + Args: + snappi_api_serv_ip (pytest fixture): snappi_api_serv_ip fixture + snappi_api_serv_port (pytest fixture): snappi_api_serv_port fixture. + """ + location = "https://" + snappi_api_serv_ip + ":" + str(snappi_api_serv_port) + # TODO: Currently extension is defaulted to ixnetwork. + # Going forward, we should be able to specify extension + # from command line while running pytest. + api = snappi.api(location=location, ext="ixnetwork") + + yield api + + if getattr(api, 'assistant', None) is not None: + api.assistant.Session.remove() + + +def __gen_mac(id): + """ + Generate a MAC address + Args: + id (int): Snappi port ID + Returns: + MAC address (string) + """ + return '00:11:22:33:44:{:02d}'.format(id) + + +def __valid_ipv4_addr(ip): + """ + Determine if a input string is a valid IPv4 address + Args: + ip (unicode str): input IP address + Returns: + True if the input is a valid IPv4 adress or False otherwise + """ + try: + return True if type(ip_address(ip)) is IPv4Address else False + except ValueError: + return False + + +def __l3_intf_config(config, port_config_list, duthost, snappi_ports): + """ + Generate Snappi configuration of layer 3 interfaces + Args: + config (obj): Snappi API config of the testbed + port_config_list (list): list of Snappi port configuration information + duthost (object): device under test + snappi_ports (list): list of Snappi port information + Returns: + True if we successfully generate configuration or False + """ + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + if 'minigraph_interfaces' in mg_facts: + l3_intf_facts = mg_facts['minigraph_interfaces'] + else: + return True + + if len(l3_intf_facts) == 0: + return True + + l3_intf = {} + for v in l3_intf_facts: + if __valid_ipv4_addr(v['addr']): + l3_intf[v['attachto']] = v + + dut_mac = str(duthost.facts['router_mac']) + + for k, v in l3_intf.items(): + intf = str(k) + gw_addr = str(v['addr']) + prefix = str(v['prefixlen']) + ip = str(v['peer_addr']) + + port_ids = [id for id, snappi_port in enumerate(snappi_ports) \ + if snappi_port['peer_port'] == intf] + if len(port_ids) != 1: + return False + + port_id = port_ids[0] + mac = __gen_mac(port_id) + + device = config.devices.device( + name='Device Port {}'.format(port_id), + container_name=config.ports[port_id].name)[-1] + + ethernet = device.ethernet + ethernet.name = 'Ethernet Port {}'.format(port_id) + ethernet.mac = mac + + ip_stack = ethernet.ipv4 + ip_stack.name = 'Ipv4 Port {}'.format(port_id) + ip_stack.address = ip + ip_stack.prefix = int(prefix) + ip_stack.gateway = gw_addr + + port_config = SnappiPortConfig(id=port_id, + ip=ip, + mac=mac, + gw=gw_addr, + gw_mac=dut_mac, + prefix_len=prefix, + port_type=SnappiPortType.IPInterface, + peer_port=intf) + + port_config_list.append(port_config) + + return True + + +def __vlan_intf_config(config, port_config_list, duthost, snappi_ports): + """ + Generate Snappi configuration of Vlan interfaces + Args: + config (obj): Snappi API config of the testbed + port_config_list (list): list of Snappi port configuration information + duthost (object): device under test + snappi_ports (list): list of Snappi port information + Returns: + True if we successfully generate configuration or False + """ + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + if 'minigraph_vlans' in mg_facts: + vlan_facts = mg_facts['minigraph_vlans'] + else: + return True + + if len(vlan_facts) == 0: + return True + + vlan_member = {} + for k, v in vlan_facts.items(): + vlan_member[k] = v['members'] + + vlan_intf_facts = mg_facts['minigraph_vlan_interfaces'] + vlan_intf = {} + for v in vlan_intf_facts: + if __valid_ipv4_addr(v['addr']): + vlan_intf[v['attachto']] = v + + dut_mac = str(duthost.facts['router_mac']) + + """ For each Vlan """ + for vlan in vlan_member: + phy_intfs = vlan_member[vlan] + gw_addr = str(vlan_intf[vlan]['addr']) + prefix = str(vlan_intf[vlan]['prefixlen']) + vlan_subnet = '{}/{}'.format(gw_addr, prefix) + vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(phy_intfs)) + + """ For each physical interface attached to this Vlan """ + for i in range(len(phy_intfs)): + phy_intf = phy_intfs[i] + vlan_ip_addr = vlan_ip_addrs[i] + + port_ids = [id for id, snappi_port in enumerate(snappi_ports) \ + if snappi_port['peer_port'] == phy_intf] + if len(port_ids) != 1: + return False + + port_id = port_ids[0] + mac = __gen_mac(port_id) + device = config.devices.device( + name='Device Port {}'.format(port_id), + container_name=config.ports[port_id].name)[-1] + + ethernet = device.ethernet + ethernet.name = 'Ethernet Port {}'.format(port_id) + ethernet.mac = mac + + ip_stack = ethernet.ipv4 + ip_stack.name = 'Ipv4 Port {}'.format(port_id) + ip_stack.address = vlan_ip_addr + ip_stack.prefix = int(prefix) + ip_stack.gateway = gw_addr + + port_config = SnappiPortConfig(id=port_id, + ip=vlan_ip_addr, + mac=mac, + gw=gw_addr, + gw_mac=dut_mac, + prefix_len=prefix, + port_type=SnappiPortType.VlanMember, + peer_port=phy_intf) + + port_config_list.append(port_config) + + return True + + +def __portchannel_intf_config(config, port_config_list, duthost, snappi_ports): + """ + Generate Snappi configuration of portchannel interfaces + Args: + config (obj): Snappi API config of the testbed + port_config_list (list): list of Snappi port configuration information + duthost (object): device under test + snappi_ports (list): list of Snappi port information + Returns: + True if we successfully generate configuration or False + """ + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + if 'minigraph_portchannels' in mg_facts: + pc_facts = mg_facts['minigraph_portchannels'] + else: + return True + + if len(pc_facts) == 0: + return True + + pc_member = {} + for k, v in pc_facts.items(): + pc_member[k] = v['members'] + + pc_intf_facts = mg_facts['minigraph_portchannel_interfaces'] + pc_intf = {} + for v in pc_intf_facts: + if __valid_ipv4_addr(v['addr']): + pc_intf[v['attachto']] = v + + dut_mac = str(duthost.facts['router_mac']) + + """ For each port channel """ + for pc in pc_member: + phy_intfs = pc_member[pc] + gw_addr = str(pc_intf[pc]['addr']) + prefix = str(pc_intf[pc]['prefixlen']) + pc_ip_addr = str(pc_intf[pc]['peer_addr']) + + lag = config.lags.lag(name='Lag {}'.format(pc))[-1] + for i in range(len(phy_intfs)): + phy_intf = phy_intfs[i] + + port_ids = [id for id, snappi_port in enumerate(snappi_ports) \ + if snappi_port['peer_port'] == phy_intf] + if len(port_ids) != 1: + return False + + port_id = port_ids[0] + mac = __gen_mac(port_id) + + lp = lag.ports.port(port_name=config.ports[port_id].name)[-1] + lp.protocol.lacp.actor_system_id = '00:00:00:00:00:01' + lp.protocol.lacp.actor_system_priority = 1 + lp.protocol.lacp.actor_port_priority = 1 + lp.protocol.lacp.actor_port_number = 1 + lp.protocol.lacp.actor_key = 1 + + lp.ethernet.name = 'Ethernet Port {}'.format(port_id) + lp.ethernet.mac = mac + + port_config = SnappiPortConfig(id=port_id, + ip=pc_ip_addr, + mac=mac, + gw=gw_addr, + gw_mac=dut_mac, + prefix_len=prefix, + port_type=SnappiPortType.PortChannelMember, + peer_port=phy_intf) + + port_config_list.append(port_config) + + device = config.devices.device(name='Device {}'.format(pc), + container_name=lag.name)[-1] + + ip_stack = device.ethernet.ipv4 + ip_stack.address = pc_ip_addr + ip_stack.prefix = int(prefix) + ip_stack.gateway = gw_addr + + return True + + +@pytest.fixture(scope="function") +def snappi_testbed_config(conn_graph_facts, + fanout_graph_facts, + duthosts, + rand_one_dut_hostname, + snappi_api): + """ + Geenrate snappi API config and port config information for the testbed + Args: + conn_graph_facts (pytest fixture) + fanout_graph_facts (pytest fixture) + duthosts (pytest fixture): list of DUTs + rand_one_dut_hostname (pytest fixture): DUT hostname + snappi_api(pytest fixture): Snappi API fixture + Returns: + - config (obj): Snappi API config of the testbed + - port_config_list (list): list of port configuration information + """ + duthost = duthosts[rand_one_dut_hostname] + + """ Generate L1 config """ + snappi_fanout = get_peer_snappi_chassis(conn_data=conn_graph_facts, + dut_hostname=duthost.hostname) + + pytest_assert(snappi_fanout is not None, 'Fail to get snappi_fanout') + + snappi_fanout_id = list(fanout_graph_facts.keys()).index(snappi_fanout) + snappi_fanout_list = SnappiFanoutManager(fanout_graph_facts) + snappi_fanout_list.get_fanout_device_details(device_number=snappi_fanout_id) + + snappi_ports = snappi_fanout_list.get_ports(peer_device=duthost.hostname) + + port_speed = None + + """ L1 config """ + config = snappi_api.config() + for i in range(len(snappi_ports)): + config.ports.port(name='Port {}'.format(i), + location=get_snappi_port_location(snappi_ports[i])) + + if port_speed is None: + port_speed = int(snappi_ports[i]['speed']) + + pytest_assert(port_speed == int(snappi_ports[i]['speed']), + 'Ports have different link speeds') + + speed_gbps = int(port_speed/1000) + + config.options.port_options.location_preemption = True + l1_config = config.layer1.layer1()[-1] + l1_config.name = 'L1 config' + l1_config.port_names = [port.name for port in config.ports] + l1_config.speed = 'speed_{}_gbps'.format(speed_gbps) + l1_config.ieee_media_defaults = False + l1_config.auto_negotiate = False + l1_config.auto_negotiation.link_training = True + l1_config.auto_negotiation.rs_fec = True + + pfc = l1_config.flow_control.ieee_802_1qbb + pfc.pfc_delay = 0 + pfc.pfc_class_0 = 0 + pfc.pfc_class_1 = 1 + pfc.pfc_class_2 = 2 + pfc.pfc_class_3 = 3 + pfc.pfc_class_4 = 4 + pfc.pfc_class_5 = 5 + pfc.pfc_class_6 = 6 + pfc.pfc_class_7 = 7 + + port_config_list = [] + + config_result = __vlan_intf_config(config=config, + port_config_list=port_config_list, + duthost=duthost, + snappi_ports=snappi_ports) + pytest_assert(config_result is True, 'Fail to configure Vlan interfaces') + + config_result = __portchannel_intf_config(config=config, + port_config_list=port_config_list, + duthost=duthost, + snappi_ports=snappi_ports) + pytest_assert(config_result is True, 'Fail to configure portchannel interfaces') + + config_result = __l3_intf_config(config=config, + port_config_list=port_config_list, + duthost=duthost, + snappi_ports=snappi_ports) + pytest_assert(config_result is True, 'Fail to configure L3 interfaces') + + return config, port_config_list \ No newline at end of file diff --git a/tests/common/snappi/snappi_helpers.py b/tests/common/snappi/snappi_helpers.py new file mode 100644 index 00000000000..0967c968b2e --- /dev/null +++ b/tests/common/snappi/snappi_helpers.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +""" +This module contains a definition of a simple helper class +"SnappiFanoutManager" which can be used to manage cards and ports of Snappi +chassis instead of reading it from fanout_graph_facts fixture. +""" + +from tests.common.helpers.assertions import pytest_assert +from tests.common.snappi.common_helpers import ansible_stdout_to_str +from tests.common.reboot import logger + + +class SnappiFanoutManager(): + """Class for managing multiple chassis and extracting the information + like chassis IP, card, port etc. from fanout_graph_fact.""" + + def __init__(self, fanout_data): + """ When multiple chassis are available inside fanout_graph_facts + this method makes a list of chassis connection-details out of it. + So each chassis and details associated with it can be accessed by + a integer index (starting from 0) + + Args: + fanout_data (dict): the dictionary returned by fanout_graph_fact. + Example format of the fanout_data is given below + + {u'snappi-sonic': { + u'device_conn': { + u'Card9/Port1': { + u'peerdevice': u'sonic-s6100-dut', + u'peerport': u'Ethernet0', + u'speed': u'100000' + }, + + u'Card9/Port2': { + u'peerdevice': u'sonic-s6100-dut', + u'peerport': u'Ethernet4', + u'speed': u'100000' + }, + + u'Card9/Port3': { + u'peerdevice': u'sonic-s6100-dut', + u'peerport': u'Ethernet8', + u'speed': u'100000' + } + }, + u'device_info': { + u'HwSku': u'SNAPPI-tester', + u'ManagementGw': u'10.36.78.54', + u'ManagementIp': u'10.36.78.53/32', + u'Type': u'DevSnappiChassis', + u'mgmtip': u'10.36.78.53' + }, + u'device_port_vlans': { + u'Card9/Port1': { + u'mode': u'Access', + u'vlanids': u'300', + u'vlanlist': [300] + }, + u'Card9/Port2': { + u'mode': u'Access', + u'vlanids': u'301', + u'vlanlist': [301] + }, + u'Card9/Port3': { + u'mode': u'Access', + u'vlanids': u'302', + u'vlanlist': [302] + } + }, + u'device_vlan_list': [301, 302, 300, 302, 300, 301], + u'device_vlan_range': [u'300-302'] + } + } + """ + self.last_fanout_assessed = None + self.fanout_list = [] + self.last_device_connection_details = None + self.current_snappi_port_list = None + self.ip_address = '0.0.0.0' + + for fanout in fanout_data.keys(): + self.fanout_list.append(fanout_data[fanout]) + + def __parse_fanout_connections__(self): + device_conn = self.last_device_connection_details + retval = [] + for key in device_conn.keys(): + fanout_port = ansible_stdout_to_str(key) + peer_port = ansible_stdout_to_str(device_conn[key]['peerport']) + peer_device = ansible_stdout_to_str(device_conn[key]['peerdevice']) + speed = ansible_stdout_to_str(device_conn[key]['speed']) + string = "{}/{}/{}/{}/{}".\ + format(self.ip_address, fanout_port, peer_port, peer_device, speed) + retval.append(string) + + return(retval) + + def get_fanout_device_details(self, device_number): + """With the help of this function you can select the chassis you want + to access. For example get_fanout_device_details(0) selects the + first chassis. It just select the chassis but does not return + anything. The rest of the function then used to extract chassis + information like "get_chassis_ip()" will the return the ip address + of chassis 0 - the first chassis in the list. + + Note: + Counting or indexing starts from 0. That is 0 = 1st chassis, + 1 = 2nd chassis ... + + Args: + device_number (int): the chassis index (0 is the first) + + Returns: + None + """ + + # Pointer to chassis info + self.last_fanout_assessed = device_number + + # Chassis connection details + self.last_device_connection_details = \ + self.fanout_list[self.last_fanout_assessed]['device_conn'] + + # Chassis ip details + chassis_ip = self.fanout_list[self.last_fanout_assessed]['device_info']['mgmtip'] + self.ip_address = ansible_stdout_to_str(chassis_ip) + + # List of chassis cards and ports + self.current_snappi_port_list = \ + self.__parse_fanout_connections__() + + def get_connection_details(self): + """This function returns all the details associated with a particular + chassis (selected earlier using get_fanout_device_details() function). + Details of the chassis will be available like chassis IP, card, ports, + peer port etc. in a dictionary format. + + Note: If you have not used get_fanout_device_details(), by default 0th + (first) chassis remains selected. + + Args: + This function takes no argument. + + Returns: + Details of the chassis connection as dictionary format. + """ + return(self.last_device_connection_details) + + def get_chassis_ip(self): + """This function returns IP address of a particular chassis + (selected earlier using get_fanout_device_details() function). + + Note: If you have not used get_fanout_device_details(), by default 0th + (first) chassis remains selected. + + Args: + This function takes no argument. + + Returns: + The IP address + """ + return self.ip_address + + def get_ports(self, peer_device=None): + """This function returns list of ports that are (1) associated with a + chassis (selected earlier using get_fanout_device_details() function) + and (2) connected to a peer device (SONiC DUT) as a list of dictionary. + + Note: If you have not used get_fanout_device_details(), by default 0th + (first) chassis remains selected. If you do not specify peer_device, + this function will return all the ports of the chassis. + + Args: + peer_device (str): hostname of the peer device + + Returns: + Dictionary of chassis card port information. + """ + retval = [] + for port in self.current_snappi_port_list: + info_list = port.split('/') + dict_element = { + 'ip': info_list[0], + 'card_id': info_list[1].replace('Card', ''), + 'port_id': info_list[2].replace('Port', ''), + 'peer_port': info_list[3], + 'peer_device': info_list[4], + 'speed': info_list[5] + } + + if peer_device is None or info_list[4] == peer_device: + retval.append(dict_element) + + return retval + + +def get_snappi_port_location(intf): + """ + Extracting location from interface, since Snappi Api accepts location + in terms of chassis ip, card, and port in different format. + + Note: Interface must have the keys 'ip', 'card_id' and 'port_id' + + Args: + intf (dict) : intf must contain the keys 'ip', 'card_id', 'port_id'. + Example format : + {'ip': u'10.36.78.53', + 'port_id': u'1', + 'card_id': u'9', + 'speed': 100000, + 'peer_port': u'Ethernet0'} + + Returns: location in string format. Example: '10.36.78.5;1;2' where + 1 is card_id and 2 is port_id. + """ + keys = set(['ip', 'card_id', 'port_id']) + pytest_assert(keys.issubset(set(intf.keys())), "intf does not have all the keys") + + return "{};{};{}".format(intf['ip'], intf['card_id'], intf['port_id']) diff --git a/tests/snappi/test_snappi.py b/tests/snappi/test_snappi.py new file mode 100644 index 00000000000..80092aa0712 --- /dev/null +++ b/tests/snappi/test_snappi.py @@ -0,0 +1,186 @@ +import time +import pytest + +from tests.common.helpers.assertions import pytest_assert, pytest_require +from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\ + fanout_graph_facts +from tests.common.snappi.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port,\ + snappi_api, snappi_testbed_config +from tests.common.snappi.port import select_ports +from tests.common.snappi.qos_fixtures import prio_dscp_map + + +@pytest.mark.topology("snappi") +@pytest.mark.disable_loganalyzer +def __gen_all_to_all_traffic(testbed_config, + port_config_list, + dut_hostname, + conn_data, + fanout_data, + priority, + prio_dscp_map): + + + rate_percent = 100 / (len(port_config_list) - 1) + duration_sec = 2 + pkt_size = 1024 + + tx_port_id_list, rx_port_id_list = select_ports(port_config_list=port_config_list, + pattern="all to all", + rx_port_id=0) + + for tx_port_id in tx_port_id_list: + for rx_port_id in rx_port_id_list: + if tx_port_id == rx_port_id: + continue + + tx_port_config = next((x for x in port_config_list if x.id == tx_port_id), None) + rx_port_config = next((x for x in port_config_list if x.id == rx_port_id), None) + + tx_mac = tx_port_config.mac + if tx_port_config.gateway == rx_port_config.gateway and \ + tx_port_config.prefix_len == rx_port_config.prefix_len: + """ If soruce and destination port are in the same subnet """ + rx_mac = rx_port_config.mac + else: + rx_mac = tx_port_config.gateway_mac + + tx_port_name = testbed_config.ports[tx_port_id].name + rx_port_name = testbed_config.ports[rx_port_id].name + + flow = testbed_config.flows.flow( + name="Flow {} -> {}".format(tx_port_id, rx_port_id))[-1] + + flow.tx_rx.port.tx_name = tx_port_name + flow.tx_rx.port.rx_name = rx_port_name + + eth, ipv4 = flow.packet.ethernet().ipv4() + eth.src.value = tx_mac + eth.dst.value = rx_mac + eth.pfc_queue.value = priority + + ipv4.src.value = tx_port_config.ip + ipv4.dst.value = rx_port_config.ip + ipv4.priority.choice = ipv4.priority.DSCP + ipv4.priority.dscp.phb.values = prio_dscp_map[priority] + ipv4.priority.dscp.ecn.value = ( + ipv4.priority.dscp.ecn.CAPABLE_TRANSPORT_1 + ) + + flow.size.fixed = pkt_size + flow.rate.percentage = rate_percent + flow.duration.fixed_seconds.seconds = duration_sec + + flow.metrics.enable = True + flow.metrics.loss = True + + return testbed_config + + +def test_snappi(snappi_api, + snappi_testbed_config, + conn_graph_facts, + fanout_graph_facts, + rand_one_dut_lossless_prio, + prio_dscp_map): + """ + Test if we can use Snappi API generate traffic in a testbed + + Args: + snappi_api (pytest fixture): Snappi session + snappi_testbed_config (pytest fixture): testbed configuration information + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + rand_one_dut_lossless_prio (str): name of lossless priority to test + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) + + Returns: + N/A + """ + testbed_config, port_config_list = snappi_testbed_config + dut_hostname, lossless_prio = rand_one_dut_lossless_prio.split('|') + + pytest_require(len(port_config_list) >= 2, "This test requires at least 2 ports") + + config = __gen_all_to_all_traffic(testbed_config=testbed_config, + port_config_list=port_config_list, + dut_hostname=dut_hostname, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + priority=int(lossless_prio), + prio_dscp_map=prio_dscp_map) + + pkt_size = config.flows[0].size.fixed + rate_percent = config.flows[0].rate.percentage + duration_sec = config.flows[0].duration.fixed_seconds.seconds + + port_speed = config.layer1[0].speed + words = port_speed.split('_') + pytest_assert(len(words) == 3 and words[1].isdigit(), + 'Fail to get port speed from {}'.format(port_speed)) + + port_speed_gbps = int(words[1]) + + # """ Apply configuration """ + snappi_api.set_config(config) + + # """ Start traffic """ + ts = snappi_api.transmit_state() + ts.state = ts.START + snappi_api.set_transmit_state(ts) + + # """ Wait for traffic to finish """ + time.sleep(duration_sec) + + attempts = 0 + max_attempts = 20 + all_flow_names = [flow.name for flow in config.flows] + + while attempts < max_attempts: + request = snappi_api.metrics_request() + request.flow.flow_names = all_flow_names + rows = snappi_api.get_metrics(request).flow_metrics + + """ If all the data flows have stopped """ + transmit_states = [row.transmit for row in rows] + if len(rows) == len(all_flow_names) and\ + list(set(transmit_states)) == ['stopped']: + time.sleep(2) + break + else: + time.sleep(1) + attempts += 1 + + pytest_assert(attempts < max_attempts, + "Flows do not stop in {} seconds".format(max_attempts)) + + """ Dump per-flow statistics """ + request = snappi_api.metrics_request() + request.flow.flow_names = all_flow_names + rows = snappi_api.get_metrics(request).flow_metrics + + ts = snappi_api.transmit_state() + ts.state = ts.STOP + snappi_api.set_transmit_state(ts) + + """ Analyze traffic results """ + for row in rows: + flow_name = row.name + rx_frames = row.frames_rx + tx_frames = row.frames_tx + + pytest_assert(rx_frames == tx_frames, + 'packet losses for {} (Tx: {}, Rx: {})'.\ + format(flow_name, tx_frames, rx_frames)) + + tput_bps = port_speed_gbps * 1e9 * rate_percent / 100.0 + exp_rx_frames = tput_bps * duration_sec / 8 / pkt_size + + deviation_thresh = 0.05 + ratio = float(exp_rx_frames) / rx_frames + deviation = abs(ratio - 1) + + pytest_assert(deviation <= deviation_thresh, + 'Expected / Actual # of pkts for flow {}: {} / {}'.\ + format(flow_name, exp_rx_frames, rx_frames)) + From d73e9332d48ae9762f1cfa7886b4bafcb0f12e9e Mon Sep 17 00:00:00 2001 From: skytreat Date: Thu, 22 Jul 2021 20:31:50 +0800 Subject: [PATCH 003/117] Added instructions to copy sonic-vs.img under ~/veos-vm/images Added instructions to copy sonic-vs.img under ~/veos-vm/images inside "Download the sonic-vs image" section --- docs/testbed/README.testbed.VsSetup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index b64449d37fa..4560e412168 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -78,7 +78,7 @@ To run the tests with a virtual SONiC device, we need a virtual SONiC image. The wget "https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/sonic-vs.img.gz" -O sonic-vs.img.gz ``` -2. Unzip the image and move it into `~/sonic-vm/images/` +2. Unzip the image and move it into `~/sonic-vm/images/` and also `~/veos-vm/images` ``` gzip -d sonic-vs.img.gz From ed0d9269fbcfed3cc7545ea5b7d62bf5b5420676 Mon Sep 17 00:00:00 2001 From: skytreat Date: Thu, 22 Jul 2021 20:33:52 +0800 Subject: [PATCH 004/117] Added script to copy sonic-vs.img to ~/veos-vm/images Added script to copy sonic-vs.img to ~/veos-vm/images --- docs/testbed/README.testbed.VsSetup.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index 4560e412168..a9adf6fe180 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -78,12 +78,13 @@ To run the tests with a virtual SONiC device, we need a virtual SONiC image. The wget "https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/sonic-vs.img.gz" -O sonic-vs.img.gz ``` -2. Unzip the image and move it into `~/sonic-vm/images/` and also `~/veos-vm/images` +2. Unzip the image and copy it into `~/sonic-vm/images/` and also `~/veos-vm/images` ``` gzip -d sonic-vs.img.gz mkdir -p ~/sonic-vm/images -mv sonic-vs.img ~/sonic-vm/images +cp sonic-vs.img ~/sonic-vm/images +mv sonic-vs.img ~/veos-vm/images ``` ## Setup sonic-mgmt docker From 138716ecf4d366327d6dca55fe47e1f7ace745b3 Mon Sep 17 00:00:00 2001 From: AndoniSanguesa <31708881+AndoniSanguesa@users.noreply.github.com> Date: Thu, 22 Jul 2021 07:51:20 -0700 Subject: [PATCH 005/117] Fix PTF Container Down after HTTP Copy Test (#3829) What is the motivation for this PR? Previous version of the http copy test was crashing the PTF container. This modification fixes that issue. How did you do it? The error originally occurred in closing the HTTP server remotely. Instead of trying to get the pid of the http server process by communicating with the PTF, a script is run on the PTF that identifies the process and shuts it down. How did you verify/test it? After the test I ran a second test that would check for communication with the PTF container. Both tests passed while not shutting down the container. Co-authored-by: Andoni Sanguesa --- tests/http/stop_http_server.py | 12 ++++++++++++ tests/http/test_http_copy.py | 30 +++++++++++++----------------- 2 files changed, 25 insertions(+), 17 deletions(-) create mode 100644 tests/http/stop_http_server.py diff --git a/tests/http/stop_http_server.py b/tests/http/stop_http_server.py new file mode 100644 index 00000000000..77b9424e612 --- /dev/null +++ b/tests/http/stop_http_server.py @@ -0,0 +1,12 @@ +import subprocess +import os + +# Get list of python processes +output = subprocess.check_output(["ps", "-ef"]) +output = output.split("\n") + +# Find process that is running the http server and kill it +for line in output: + if "tmp/start_http_server.py" in line: + pid = line.split()[1] + os.system("kill {}".format(pid)) \ No newline at end of file diff --git a/tests/http/test_http_copy.py b/tests/http/test_http_copy.py index 8b6db6c3c52..e5519775352 100644 --- a/tests/http/test_http_copy.py +++ b/tests/http/test_http_copy.py @@ -15,17 +15,16 @@ def test_http_copy(duthosts, rand_one_dut_hostname, ptfhost): """Test that HTTP (copy) can be used to download objects to the DUT""" - - pytest.skip("---- test causes ptf docker to crash after the test is done, skipping until the issue is addressed") - + duthost = duthosts[rand_one_dut_hostname] ptf_ip = ptfhost.mgmt_ip test_file_name = "test_file.bin" - # Copies http server file to ptf module_async=True + # Copies http server files to ptf ptfhost.copy(src="http/start_http_server.py", dest="/tmp/start_http_server.py") - + ptfhost.copy(src="http/stop_http_server.py", dest="/tmp/stop_http_server.py") + # Starts the http server on the ptf ptfhost.command("python /tmp/start_http_server.py", module_async=True) @@ -44,9 +43,9 @@ def test_http_copy(duthosts, rand_one_dut_hostname, ptfhost): ptfhost.command(("dd if=/dev/urandom of=./{} count=1 bs=1000000000 iflag=fullblock".format(test_file_name))) # Ensure that file was downloaded - res = ptfhost.command("ls -ltr ./{}".format(test_file_name), module_ignore_errors=True)["rc"] + file_stat = ptfhost.stat(path="./{}".format(test_file_name)) - pytest_assert(res==0, "Test file was not found on DUT after attempted http get") + pytest_assert(file_stat["stat"]["exists"], "Test file was not found on DUT after attempted http get") # Generate MD5 checksum to compare with the sent file output = ptfhost.command("md5sum ./{}".format(test_file_name))["stdout"] @@ -56,9 +55,9 @@ def test_http_copy(duthosts, rand_one_dut_hostname, ptfhost): duthost.command("curl -O {}:{}/{}".format(ptf_ip, HTTP_PORT, test_file_name)) # Validate file was received - res = duthost.command("ls -ltr ./{}".format(test_file_name), module_ignore_errors=True)["rc"] + file_stat = duthost.stat(path="./{}".format(test_file_name)) - pytest_assert(res==0, "Test file was not found on DUT after attempted http get") + pytest_assert(file_stat["stat"]["exists"], "Test file was not found on DUT after attempted http get") # Get MD5 checksum of received file output = duthost.command("md5sum ./{}".format(test_file_name))["stdout"] @@ -71,23 +70,20 @@ def test_http_copy(duthosts, rand_one_dut_hostname, ptfhost): duthost.command("sudo rm ./{}".format(test_file_name)) # Confirm cleanup occured succesfuly - res = duthost.command("ls -ltr ./{}".format(test_file_name), module_ignore_errors=True)["rc"] + file_stat = duthost.stat(path="./{}".format(test_file_name)) - pytest_assert(res!=0, "DUT container could not be cleaned.") + pytest_assert(not file_stat["stat"]["exists"], "DUT container could not be cleaned.") # Delete file off ptf ptfhost.command(("rm ./{}".format(test_file_name))) # Ensure that file was removed correctly - res = ptfhost.command("ls -ltr ./{}".format(test_file_name), module_ignore_errors=True)["rc"] + file_stat = ptfhost.stat(path="./{}".format(test_file_name)) - pytest_assert(res!=0, "PTF container could not be cleaned.") + pytest_assert(not file_stat["stat"]["exists"], "PTF container could not be cleaned.") # Stops http server - output = ptfhost.command("netstat -nlp | grep :8080")["stdout"] - pid_and_program = output.split()[-2] - pid = pid_and_program.split("/")[0] - ptfhost.command("kill {}".format(pid)) + ptfhost.command("python /tmp/stop_http_server.py") # Ensure that HTTP server was closed started = True From 067014c2e7502b0854c3fb04769f50c417136083 Mon Sep 17 00:00:00 2001 From: roman_savchuk Date: Thu, 22 Jul 2021 19:25:32 +0300 Subject: [PATCH 006/117] Removed hardcoded proxy from memory checker test (#3779) What is the motivation for this PR? TC can not install stress tool due to hardcoded proxy settings How did you do it? Added ability to read proxy settings from creds How did you verify/test it? Run TC, stress tool have been successfully installed and deleted after test run Signed-off-by: Roman Savchuk --- tests/memory_checker/test_memory_checker.py | 23 +++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/tests/memory_checker/test_memory_checker.py b/tests/memory_checker/test_memory_checker.py index 1452e693b0c..3f542af9e5c 100644 --- a/tests/memory_checker/test_memory_checker.py +++ b/tests/memory_checker/test_memory_checker.py @@ -59,8 +59,11 @@ def modify_monit_config_and_restart(duthost): logger.info("Restart Monit service ...") duthost.shell("sudo systemctl restart monit") + logger.info("Restore bgp neighbours ...") + duthost.shell("config bgp startup all") -def install_stress_utility(duthost, container_name): + +def install_stress_utility(duthost, creds, container_name): """Installs the 'stress' utility in container. Args: @@ -72,9 +75,15 @@ def install_stress_utility(duthost, container_name): """ logger.info("Installing 'stress' utility in '{}' container ...".format(container_name)) - install_cmd_result = duthost.shell("docker exec {} bash -c 'export http_proxy=http://100.127.20.21:8080 \ - && export https_proxy=http://100.127.20.21:8080 \ - && apt-get install stress -y'".format(container_name)) + # Get proxy settings from creds + http_proxy = creds.get('proxy_env', {}).get('http_proxy', '') + https_proxy = creds.get('proxy_env', {}).get('https_proxy', '') + + # Shutdown bgp for having ability to install stress tool + duthost.shell("config bgp shutdown all") + install_cmd_result = duthost.shell("docker exec {} bash -c 'export http_proxy={} \ + && export https_proxy={} \ + && apt-get install stress -y'".format(container_name, http_proxy, https_proxy)) exit_code = install_cmd_result["rc"] pytest_assert(exit_code == 0, "Failed to install 'stress' utility!") @@ -184,7 +193,7 @@ def postcheck_critical_processes(duthost, container_name): logger.info("All critical processes in '{}' container are running.".format(container_name)) -def test_memory_checker(duthosts, enum_rand_one_per_hwsku_frontend_hostname): +def test_memory_checker(duthosts, creds, enum_rand_one_per_hwsku_frontend_hostname): """Checks whether the telemetry container can be restarted or not if the memory usage of it is beyond the threshold. The `stress` utility is leveraged as the memory stressing tool. @@ -208,6 +217,7 @@ def test_memory_checker(duthosts, enum_rand_one_per_hwsku_frontend_hostname): or parse_version(duthost.kernel_version) > parse_version("4.9.0"), "Test is not supported for 20191130.72 and older image versions!") + expected_alerting_messages = [] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="container_restart_due_to_memory") loganalyzer.expect_regex = [] @@ -218,7 +228,8 @@ def test_memory_checker(duthosts, enum_rand_one_per_hwsku_frontend_hostname): loganalyzer.expect_regex.extend(expected_alerting_messages) marker = loganalyzer.init() - install_stress_utility(duthost, container_name) + install_stress_utility(duthost, creds, container_name) consume_memory_and_restart_container(duthost, container_name, vm_workers, loganalyzer, marker) + remove_stress_utility(duthost, container_name) postcheck_critical_processes(duthost, container_name) From 3a5a650b49d59d3962ceac6cbd3c9ae30a7d047d Mon Sep 17 00:00:00 2001 From: Vladyslav Morokhovych Date: Fri, 23 Jul 2021 01:59:18 +0300 Subject: [PATCH 007/117] [NAT]: Fixes for NAT TC (#3744) Changes in this PR: - Fixed dut_nat_iptables_status parser, now use only valid data for postrouting entry. - Change in get_redis_val to remove 'expireat' and 'ttl' fields. - Fixed test_nat_static_redis_asic - now source and destination output will be get dynamically. - Add minor debug support in asserts for unexpected iptables output in NAT table and fix typo. How did you verify/test it? Run specific TC to test fixes Signed-off-by: Vladyslav Morokhovych --- tests/nat/nat_helpers.py | 8 +++++++- tests/nat/test_dynamic_nat.py | 38 +++++++++++++++++------------------ tests/nat/test_static_nat.py | 17 ++++++++-------- 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/tests/nat/nat_helpers.py b/tests/nat/nat_helpers.py index f734325c72b..f909a26a699 100644 --- a/tests/nat/nat_helpers.py +++ b/tests/nat/nat_helpers.py @@ -143,11 +143,12 @@ def dut_nat_iptables_status(duthost): index_prerouting = [i for i in range(0, len(entries)) if "PREROUTING" in entries[i]][0] + 2 index_input = [i for i in range(0, len(entries)) if "INPUT" in entries[i]][0] index_postrouting = [i for i in range(0, len(entries)) if 'POSTROUTING' in entries[i]][0] + 2 + index_output = [i for i in range(0, len(entries)) if "OUTPUT" in entries[i]][0] if any(['DOCKER' in entry for entry in entries]): index_docker = [i for i in range(0, len(entries)) if 'DOCKER' in entries[i]][0] postrouting = [el for el in entries[index_postrouting:index_docker] if len(el) > 1] else: - postrouting = [el for el in entries[index_postrouting:] if len(el) > 1] + postrouting = [el for el in entries[index_postrouting:index_output] if len(el) > 1] prerouting = [el for el in entries[index_prerouting:index_input] if len(el) > 0] nat_table_status["prerouting"] = [" ".join([s.strip() for s in el.split() if len(el) > 0]) for el in prerouting] @@ -1108,6 +1109,11 @@ def get_redis_val(duthost, db, key): if output["rc"]: raise Exception('Return code is {} not 0'.format(output_cli["rc"])) redis_dict = json.loads(output['stdout']) + for table in redis_dict: + if 'expireat' in redis_dict[table]: + redis_dict[table].pop('expireat') + if 'ttl' in redis_dict[table]: + redis_dict[table].pop('ttl') return redis_dict except Exception as e: return e.__str__() diff --git a/tests/nat/test_dynamic_nat.py b/tests/nat/test_dynamic_nat.py index 05897d5b742..fd5f8cb245e 100644 --- a/tests/nat/test_dynamic_nat.py +++ b/tests/nat/test_dynamic_nat.py @@ -635,7 +635,7 @@ def test_nat_interfaces_flap_dynamic(self, ptfhost, tbinfo, duthost, ptfadapter, portrange = "{}-{}".format(POOL_RANGE_START_PORT, POOL_RANGE_END_PORT) acl_subnet = setup_data[interface_type]["acl_subnet"] public_ip = setup_data[interface_type]["public_ip"] - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [ "SNAT tcp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, @@ -645,8 +645,8 @@ def test_nat_interfaces_flap_dynamic(self, ptfhost, tbinfo, duthost, ptfadapter, "SNAT icmp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, portrange)] } - pytest_assert(iptables_rules == iptables_ouput, - "Unexpected iptables output for nat table") + pytest_assert(iptables_rules == iptables_output, + "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Enable outer interface dut_interface_control(duthost, "enable", setup_data["config_portchannels"][ifname_to_disable]['members'][0]) # Send traffic @@ -801,7 +801,7 @@ def test_nat_dynamic_binding_remove(self, ptfhost, tbinfo, duthost, ptfadapter, portrange)] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Delete NAT bindings exec_command(duthost, ["config nat remove bindings"]) # Confirm that binding has been removed @@ -822,7 +822,7 @@ def test_nat_dynamic_binding_remove(self, ptfhost, tbinfo, duthost, ptfadapter, "postrouting": [] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) @pytest.mark.nat_dynamic def test_nat_dynamic_iptable_snat(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, @@ -893,7 +893,7 @@ def test_nat_dynamic_outside_interface_delete(self, ptfhost, tbinfo, duthost, pt "SNAT icmp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, portrange)] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Remove outside interface IP interface_ip = "{} {}/{}".format(setup_data[interface_type]["vrf_conf"]["red"]["dut_iface"], setup_data[interface_type]["vrf_conf"]["red"]["gw"], @@ -906,7 +906,7 @@ def test_nat_dynamic_outside_interface_delete(self, ptfhost, tbinfo, duthost, pt "postrouting": [] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Restore previous configuration dut_interface_control(duthost, "ip add", setup_data["config_portchannels"][ifname_to_disable]['members'][0], interface_ip) # Send TCP/UDP traffic and confirm that restoring previous configuration went well @@ -936,11 +936,11 @@ def test_nat_dynamic_nat_pools(self, tbinfo, duthost, ptfhost, ptfadapter, setup src_port, dst_port = get_l4_default_ports(protocol_type) # Check, if iptables is empty - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Prepare and add configuration json file @@ -952,7 +952,7 @@ def test_nat_dynamic_nat_pools(self, tbinfo, duthost, ptfhost, ptfadapter, setup } write_json(duthost, nat_session, 'dynamic_binding') # Check iptables - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [ "SNAT tcp -- {} 0.0.0.0/0 mark match 0x1 to:{}:{} fullcone".format(acl_subnet, public_ip, @@ -962,7 +962,7 @@ def test_nat_dynamic_nat_pools(self, tbinfo, duthost, ptfhost, ptfadapter, setup "SNAT icmp -- {} 0.0.0.0/0 mark match 0x1 to:{}:{} fullcone".format(acl_subnet, public_ip, port_range)] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Check traffic. Zone 1 is not configured, not NAT translations expected generate_and_verify_not_translated_traffic(ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type) @@ -970,7 +970,7 @@ def test_nat_dynamic_nat_pools(self, tbinfo, duthost, ptfhost, ptfadapter, setup # Setup zones nat_zones_config(duthost, setup_data, interface_type) # Check iptables - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [ "SNAT tcp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, @@ -980,7 +980,7 @@ def test_nat_dynamic_nat_pools(self, tbinfo, duthost, ptfhost, ptfadapter, setup "SNAT icmp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, port_range)] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Perform TCP handshake (host-tor -> leaf-tor) @@ -1019,7 +1019,7 @@ def test_nat_dynamic_modify_bindings(self, ptfhost, tbinfo, duthost, ptfadapter, portrange = "{}-{}".format(POOL_RANGE_START_PORT, POOL_RANGE_END_PORT) acl_subnet = setup_data[interface_type]["acl_subnet"] public_ip = setup_data[interface_type]["public_ip"] - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [ "SNAT tcp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, @@ -1029,7 +1029,7 @@ def test_nat_dynamic_modify_bindings(self, ptfhost, tbinfo, duthost, ptfadapter, "SNAT icmp -- {} 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(acl_subnet, public_ip, portrange)] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Send TCP/UDP traffic and check generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, direction, protocol_type, nat_type=nat_type) @@ -1040,11 +1040,11 @@ def test_nat_dynamic_modify_bindings(self, ptfhost, tbinfo, duthost, ptfadapter, # Check, if nat bindings is empty pytest_assert(len(get_cli_show_nat_config_output(duthost, "bindings")) == 0, "Nat bindings is not empty") # Check, if iptables is empty - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) wait_timeout(protocol_type) # Send TCP/UDP traffic and check without NAT @@ -1056,7 +1056,7 @@ def test_nat_dynamic_modify_bindings(self, ptfhost, tbinfo, duthost, ptfadapter, nat_binding[0]["pool name"], acl_subnet)) public_ip = setup_data[interface_type]["public_ip"] portrange = "{}-{}".format(POOL_RANGE_START_PORT, POOL_RANGE_END_PORT) - iptables_ouput = dut_nat_iptables_status(duthost) + iptables_output = dut_nat_iptables_status(duthost) iptables_rules = {"prerouting": ['DNAT all -- 0.0.0.0/0 0.0.0.0/0 to:1.1.1.1 fullcone'], "postrouting": [ "SNAT tcp -- 0.0.0.0/0 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(public_ip, @@ -1066,7 +1066,7 @@ def test_nat_dynamic_modify_bindings(self, ptfhost, tbinfo, duthost, ptfadapter, "SNAT icmp -- 0.0.0.0/0 0.0.0.0/0 mark match 0x2 to:{}:{} fullcone".format(public_ip, portrange)] } - pytest_assert(iptables_rules == iptables_ouput, + pytest_assert(iptables_rules == iptables_output, "Unexpected iptables output for nat table. \n Got:\n{}\n Expected:\n{}".format(iptables_ouput, iptables_rules)) # Perform TCP handshake (host-tor -> leaf-tor) diff --git a/tests/nat/test_static_nat.py b/tests/nat/test_static_nat.py index 6362367b0ba..49c344f4f69 100644 --- a/tests/nat/test_static_nat.py +++ b/tests/nat/test_static_nat.py @@ -630,7 +630,7 @@ def test_nat_static_iptables_add_remove(self, ptfhost, tbinfo, duthost, ptfadapt "postrouting": [] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Set NAT configuration for test network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type) apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type, @@ -650,7 +650,7 @@ def test_nat_static_iptables_add_remove(self, ptfhost, tbinfo, duthost, ptfadapt "SNAT all -- {} 0.0.0.0/0 mark match 0x2 to:{}".format(network_data.private_ip, network_data.public_ip)] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Remove with CLI crud_remove = {"remove": {"action": "remove", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip}} entries_table.update(crud_operations_basic(duthost, crud_remove)) @@ -662,7 +662,7 @@ def test_nat_static_iptables_add_remove(self, ptfhost, tbinfo, duthost, ptfadapt "postrouting": [] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) @pytest.mark.nat_static def test_nat_static_global_double_add(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, @@ -678,7 +678,7 @@ def test_nat_static_global_double_add(self, ptfhost, tbinfo, duthost, ptfadapter "postrouting": [] } pytest_assert(iptables_rules == iptables_output, - "Unexpected iptables output for nat table") + "Unexpected iptables output for nat table \n Got:\n{}\n Expected:\n{}".format(iptables_output, iptables_rules)) # Set NAT configuration for test network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type) apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type, @@ -991,8 +991,6 @@ def test_nat_static_redis_napt(self, ptfhost, tbinfo, duthost, ptfadapter, setup @pytest.mark.nat_static def test_nat_static_redis_asic(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, protocol_type): - sai_nat_src_id = {"TCP": 1, "UDP": 2}[protocol_type] - sai_nat_dst_id = {"TCP": 2, "UDP": 1}[protocol_type] interface_type, setup_info = setup_test_env setup_data = copy.deepcopy(setup_info) nat_type = 'static_napt' @@ -1012,8 +1010,11 @@ def test_nat_static_redis_asic(self, ptfhost, tbinfo, duthost, ptfadapter, setup db_rules_src = get_db_rules(duthost, ptfadapter, setup_test_env, protocol_type, 'ASIC_DB SRC') db_rules_dst = get_db_rules(duthost, ptfadapter, setup_test_env, protocol_type, 'ASIC_DB DST') output = get_redis_val(duthost, 1, "NAT_ENTRY") - output_src = output[(list(output.keys())[sai_nat_src_id])]['value'] - output_dst = output[(list(output.keys())[sai_nat_dst_id])]['value'] + for count, entry in enumerate(output): + if 'SAI_NAT_TYPE_SOURCE_NAT' in str(entry): + output_src = output[(list(output.keys())[count])]['value'] + if 'SAI_NAT_TYPE_DESTINATION_NAT"' in str(entry): + output_dst = output[(list(output.keys())[count])]['value'] pytest_assert(db_rules_src["SAI_NAT_ENTRY_ATTR_SRC_IP"] == output_src["SAI_NAT_ENTRY_ATTR_SRC_IP"], "Unexpected output \n Got:\n{}\n Expected:\n{}".format(output_src["SAI_NAT_ENTRY_ATTR_SRC_IP"], db_rules_src["SAI_NAT_ENTRY_ATTR_SRC_IP"])) pytest_assert(db_rules_src["SAI_NAT_ENTRY_ATTR_L4_SRC_PORT"] == output_src["SAI_NAT_ENTRY_ATTR_L4_SRC_PORT"], From 4ab629f2bab4eb5585eb15d44cd2866346f25d58 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Fri, 23 Jul 2021 09:28:23 +0800 Subject: [PATCH 008/117] [ptfadapter] Support two ptf port mapping mode (#3811) Approach What is the motivation for this PR? Enable testcases to use original ports for storage backend topologies. How did you do it? Since ptfadapter is a module-level fixture, we could modify its port mapping behavior by setting a test module global variable so that it could be parsed by ptfadapter fixture to determine its port choice. --- tests/common/plugins/ptfadapter/__init__.py | 24 +++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/common/plugins/ptfadapter/__init__.py b/tests/common/plugins/ptfadapter/__init__.py index eacc0193c30..0381eb09fb8 100644 --- a/tests/common/plugins/ptfadapter/__init__.py +++ b/tests/common/plugins/ptfadapter/__init__.py @@ -5,6 +5,8 @@ from ptfadapter import PtfTestAdapter import ptf.testutils +from tests.common import constants + DEFAULT_PTF_NN_PORT = 10900 DEFAULT_DEVICE_NUM = 0 @@ -72,7 +74,7 @@ def get_ifaces(netdev_output): return ifaces -def get_ifaces_map(ifaces): +def get_ifaces_map(ifaces, ptf_port_mapping_mode): """Get interface map.""" sub_ifaces = [] iface_map = {} @@ -85,10 +87,15 @@ def get_ifaces_map(ifaces): iface_index = int(iface_suffix) iface_map[iface_index] = iface - # override those interfaces that has sub interface - for i, si in sub_ifaces: - iface_map[i] = si - return iface_map + if ptf_port_mapping_mode == "use_sub_interface": + # override those interfaces that has sub interface + for i, si in sub_ifaces: + iface_map[i] = si + return iface_map + elif ptf_port_mapping_mode == "use_orig_interface": + return iface_map + else: + raise ValueError("Unsupported ptf port mapping mode: %s" % ptf_port_mapping_mode) @pytest.fixture(scope='module') @@ -100,11 +107,16 @@ def ptfadapter(ptfhost, tbinfo, request): however if something goes really wrong in one test module it is safer to restart PTF before proceeding running other test modules """ + # get ptf port mapping mode + if 'backend' in tbinfo['topo']['name']: + ptf_port_mapping_mode = getattr(request.module, "PTF_PORT_MAPPING_MODE", constants.PTF_PORT_MAPPING_MODE_DEFAULT) + else: + ptf_port_mapping_mode = 'use_orig_interface' # get the eth interfaces from PTF and initialize ifaces_map res = ptfhost.command('cat /proc/net/dev') ifaces = get_ifaces(res['stdout']) - ifaces_map = get_ifaces_map(ifaces) + ifaces_map = get_ifaces_map(ifaces, ptf_port_mapping_mode) # generate supervisor configuration for ptf_nn_agent ptfhost.host.options['variable_manager'].extra_vars.update({ From 8802c85e7df3f4900a582f2715aef5dc8f2d1c6c Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 22 Jul 2021 23:51:36 -0700 Subject: [PATCH 009/117] Cancel untar in collect_techsuport (#3880) Signed-off-by: bingwang --- tests/conftest.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e09e8ff3456..70fde19df46 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,6 @@ import os import glob import json -import tarfile import logging import getpass import random @@ -553,10 +552,6 @@ def collect_techsupport_on_dut(request, a_dut): res = a_dut.shell("generate_dump -s \"-2 hours\"") fname = res['stdout_lines'][-1] a_dut.fetch(src=fname, dest="logs/{}".format(testname)) - tar = tarfile.open("logs/{}/{}/{}".format(testname, a_dut.hostname, fname)) - for m in tar.getmembers(): - if m.isfile(): - tar.extract(m, path="logs/{}/{}/".format(testname, a_dut.hostname)) logging.info("########### Collected tech support for test {} ###########".format(testname)) From 6baabff8a0d961959c4b20b094dc29674dc40730 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Sat, 24 Jul 2021 12:05:55 +0800 Subject: [PATCH 010/117] [test_pfcwd_function] Enable storage backend topologies (#3820) Approach What is the motivation for this PR? Enable test_pfcwd_function over storage backend topologies. How did you do it? Support parsing sub-interfaces to get test IO ports. Enable ptftest pfc_wd.py to send VLAN tagged packets with priority. Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you verify/test it? run test_pfcwd_function over t0-backend, t1-backend and other topos. --- ansible/roles/test/files/ptftests/pfc_wd.py | 97 +++++++++++++-------- tests/pfcwd/conftest.py | 6 +- tests/pfcwd/files/pfcwd_helper.py | 79 ++++++++++++++++- tests/pfcwd/test_pfcwd_function.py | 34 +++++++- 4 files changed, 172 insertions(+), 44 deletions(-) diff --git a/ansible/roles/test/files/ptftests/pfc_wd.py b/ansible/roles/test/files/ptftests/pfc_wd.py index 55c2d487652..42e7234cfa1 100644 --- a/ansible/roles/test/files/ptftests/pfc_wd.py +++ b/ansible/roles/test/files/ptftests/pfc_wd.py @@ -32,6 +32,8 @@ def setUp(self): self.ip_dst = self.test_params['ip_dst'] self.port_type = self.test_params['port_type'] self.wd_action = self.test_params.get('wd_action', 'drop') + self.port_src_vlan_id = self.test_params.get('port_src_vlan_id') + self.port_dst_vlan_id = self.test_params.get('port_dst_vlan_id') def runTest(self): ecn = 1 @@ -45,7 +47,7 @@ def runTest(self): for match in matches: for port in match.split(): dst_port_list.append(int(port)) - src_mac = self.dataplane.get_mac(0, 0) + src_mac = self.dataplane.get_mac(*random.choice(self.dataplane.ports.keys())) if self.port_type == "portchannel": for x in range(0, self.pkt_count): @@ -61,23 +63,35 @@ def runTest(self): ip_src =ipaddress.IPv4Address(unicode(ip_src,'utf-8')) ip_src = str(ip_src) - pkt = simple_tcp_packet( - eth_dst=self.router_mac, - eth_src=src_mac, - ip_src=ip_src, - ip_dst=self.ip_dst, - ip_tos = tos, - tcp_sport=sport, - tcp_dport=dport, - ip_ttl=64) - exp_pkt = simple_tcp_packet( - eth_src=self.router_mac, - ip_src=ip_src, - ip_dst=self.ip_dst, - ip_tos = tos, - tcp_sport=sport, - tcp_dport=dport, - ip_ttl=63) + pkt_args = { + 'eth_dst': self.router_mac, + 'eth_src': src_mac, + 'ip_src': ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos, + 'tcp_sport': sport, + 'tcp_dport': dport, + 'ip_ttl': 64 + } + if self.port_src_vlan_id is not None: + pkt_args['dl_vlan_enable'] = True + pkt_args['vlan_vid'] = int(self.port_src_vlan_id) + pkt_args['vlan_pcp'] = self.queue_index + pkt = simple_tcp_packet(**pkt_args) + exp_pkt_args = { + 'eth_src': self.router_mac, + 'ip_src': ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos, + 'tcp_sport': sport, + 'tcp_dport': dport, + 'ip_ttl': 63 + } + if self.port_dst_vlan_id is not None: + exp_pkt_args['dl_vlan_enable'] = True + exp_pkt_args['vlan_vid'] = int(self.port_dst_vlan_id) + exp_pkt_args['vlan_pcp'] = self.queue_index + exp_pkt = simple_tcp_packet(**exp_pkt_args) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") @@ -91,23 +105,35 @@ def runTest(self): dport = random.randint(0, 65535) ip_src = "1.1.1.1" - pkt = simple_tcp_packet( - eth_dst=self.router_mac, - eth_src=src_mac, - ip_src=ip_src, - ip_dst=self.ip_dst, - ip_tos = tos, - tcp_sport=sport, - tcp_dport=dport, - ip_ttl=64) - exp_pkt = simple_tcp_packet( - eth_src=self.router_mac, - ip_src=ip_src, - ip_dst=self.ip_dst, - ip_tos = tos, - tcp_sport=sport, - tcp_dport=dport, - ip_ttl=63) + pkt_args = { + 'eth_dst': self.router_mac, + 'eth_src': src_mac, + 'ip_src': ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos, + 'tcp_sport': sport, + 'tcp_dport': dport, + 'ip_ttl': 64 + } + if self.port_src_vlan_id is not None: + pkt_args['dl_vlan_enable'] = True + pkt_args['vlan_vid'] = int(self.port_src_vlan_id) + pkt_args['vlan_pcp'] = self.queue_index + pkt = simple_tcp_packet(**pkt_args) + exp_pkt_args = { + 'eth_src': self.router_mac, + 'ip_src': ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos, + 'tcp_sport': sport, + 'tcp_dport': dport, + 'ip_ttl': 63 + } + if self.port_dst_vlan_id is not None: + exp_pkt_args['dl_vlan_enable'] = True + exp_pkt_args['vlan_vid'] = int(self.port_dst_vlan_id) + exp_pkt_args['vlan_pcp'] = self.queue_index + exp_pkt = simple_tcp_packet(**exp_pkt_args) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") @@ -116,7 +142,6 @@ def runTest(self): masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len") send_packet(self, self.port_src, pkt, self.pkt_count) - if self.wd_action == 'drop': return verify_no_packet_any(self, masked_exp_pkt, dst_port_list) elif self.wd_action == 'forward': diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py index 564cce701ac..c6b5cbd4778 100644 --- a/tests/pfcwd/conftest.py +++ b/tests/pfcwd/conftest.py @@ -2,8 +2,9 @@ import pytest from tests.common.fixtures.conn_graph_facts import conn_graph_facts -from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] -from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice from .files.pfcwd_helper import TrafficPorts, set_pfc_timers, select_test_ports @@ -162,7 +163,6 @@ def setup_pfc_test( # set poll interval duthost.command("pfcwd interval {}".format(setup_info['pfc_timers']['pfc_wd_poll_time'])) - yield setup_info logger.info("--- Starting Pfcwd ---") diff --git a/tests/pfcwd/files/pfcwd_helper.py b/tests/pfcwd/files/pfcwd_helper.py index 261bc135881..c35838e089d 100644 --- a/tests/pfcwd/files/pfcwd_helper.py +++ b/tests/pfcwd/files/pfcwd_helper.py @@ -1,6 +1,9 @@ import datetime import ipaddress +from tests.common import constants + + class TrafficPorts(object): """ Generate a list of ports needed for the PFC Watchdog test""" def __init__(self, mg_facts, neighbors, vlan_nw): @@ -35,6 +38,8 @@ def build_port_list(self): self.parse_intf_list() elif self.mg_facts['minigraph_portchannels']: self.parse_pc_list() + elif 'minigraph_vlan_sub_interfaces' in self.mg_facts: + self.parse_vlan_sub_interface_list() if self.mg_facts['minigraph_vlans']: self.test_ports.update(self.parse_vlan_list()) return self.test_ports @@ -183,19 +188,87 @@ def parse_vlan_list(self): temp_ports (dict): port info constructed from the vlan interfaces """ temp_ports = dict() - vlan_members = self.vlan_info[self.vlan_info.keys()[0]]['members'] + vlan_details = self.vlan_info.values()[0] + vlan_members = vlan_details['members'] + vlan_type = vlan_details.get('type') + vlan_id = vlan_details['vlanid'] + rx_port = self.pfc_wd_rx_port if isinstance(self.pfc_wd_rx_port, list) else [self.pfc_wd_rx_port] + rx_port_id = self.pfc_wd_rx_port_id if isinstance(self.pfc_wd_rx_port_id, list) else [self.pfc_wd_rx_port_id] for item in vlan_members: temp_ports[item] = {'test_neighbor_addr': self.vlan_nw, - 'rx_port': self.pfc_wd_rx_port, + 'rx_port': rx_port, 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, 'peer_device': self.neighbors[item]['peerdevice'], 'test_port_id': self.port_idx_info[item], - 'rx_port_id': self.pfc_wd_rx_port_id, + 'rx_port_id': rx_port_id, 'test_port_type': 'vlan' } + if hasattr(self, 'pfc_wd_rx_port_vlan_id'): + temp_ports[item]['rx_port_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if vlan_type is not None and vlan_type == 'Tagged': + temp_ports[item]['test_port_vlan_id'] = vlan_id return temp_ports + def parse_vlan_sub_interface_list(self): + """Build the port info from the vlan sub-interfaces.""" + pfc_wd_test_port = None + first_pair = False + for sub_intf in self.mg_facts['minigraph_vlan_sub_interfaces']: + if ipaddress.ip_address(unicode(sub_intf['addr'])).version != 4: + continue + intf_name, vlan_id = sub_intf['attachto'].split(constants.VLAN_SUB_INTERFACE_SEPARATOR) + # first port + if not self.pfc_wd_rx_port: + self.pfc_wd_rx_port = intf_name + self.pfc_wd_rx_port_addr = sub_intf['addr'] + self.pfc_wd_rx_port_id = self.port_idx_info[self.pfc_wd_rx_port] + self.pfc_wd_rx_port_vlan_id = vlan_id + elif not pfc_wd_test_port: + # second port + first_pair = True + + # populate info for all ports except the first one + if first_pair or pfc_wd_test_port: + pfc_wd_test_port = intf_name + pfc_wd_test_port_addr = sub_intf['addr'] + pfc_wd_test_port_id = self.port_idx_info[pfc_wd_test_port] + pfc_wd_test_neighbor_addr = None + + for item in self.bgp_info: + if ipaddress.ip_address(unicode(item['addr'])).version != 4: + continue + if not self.pfc_wd_rx_neighbor_addr and item['peer_addr'] == self.pfc_wd_rx_port_addr: + self.pfc_wd_rx_neighbor_addr = item['addr'] + if item['peer_addr'] == pfc_wd_test_port_addr: + pfc_wd_test_neighbor_addr = item['addr'] + + self.test_ports[pfc_wd_test_port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, + 'rx_port': [self.pfc_wd_rx_port], + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors[pfc_wd_test_port]['peerdevice'], + 'test_port_id': pfc_wd_test_port_id, + 'rx_port_id': [self.pfc_wd_rx_port_id], + 'rx_port_vlan_id': self.pfc_wd_rx_port_vlan_id, + 'test_port_vlan_id': vlan_id, + 'test_port_type': 'interface' + } + # populate info for the first port + if first_pair: + self.test_ports[self.pfc_wd_rx_port] = {'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'rx_port': [pfc_wd_test_port], + 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, + 'peer_device': self.neighbors[self.pfc_wd_rx_port]['peerdevice'], + 'test_port_id': self.pfc_wd_rx_port_id, + 'rx_port_id': [pfc_wd_test_port_id], + 'rx_port_vlan_id': vlan_id, + 'test_port_vlan_id': self.pfc_wd_rx_port_vlan_id, + 'test_port_type': 'interface' + } + + first_pair = False + + def set_pfc_timers(): """ Set PFC timers diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py index 07445426b81..e496f20000c 100644 --- a/tests/pfcwd/test_pfcwd_function.py +++ b/tests/pfcwd/test_pfcwd_function.py @@ -11,6 +11,10 @@ from .files.pfcwd_helper import start_wd_on_ports from tests.ptf_runner import ptf_runner from tests.common import port_toggle +from tests.common import constants + + +PTF_PORT_MAPPING_MODE = 'use_orig_interface' TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates") EXPECT_PFC_WD_DETECT_RE = ".* detected PFC storm .*" @@ -293,6 +297,8 @@ def setup_port_params(self, port, init=False): self.pfc_wd['test_port_ids'] = self.ports[port]['test_portchannel_members'] elif self.pfc_wd['port_type'] in ["vlan", "interface"]: self.pfc_wd['test_port_ids'] = self.pfc_wd['test_port_id'] + self.pfc_wd['test_port_vlan_id'] = self.ports[port].get('test_port_vlan_id') + self.pfc_wd['rx_port_vlan_id'] = self.ports[port].get('rx_port_vlan_id') self.queue_oid = self.dut.get_queue_oid(port, self.pfc_wd['queue_index']) def update_queue(self, port): @@ -349,8 +355,10 @@ def resolve_arp(self, vlan): """ if self.pfc_wd['port_type'] == "vlan": self.ptf.script("./scripts/remove_ip.sh") - self.ptf.command("ifconfig eth{} {}".format(self.pfc_wd['test_port_id'], - self.pfc_wd['test_neighbor_addr'])) + ptf_port = 'eth%s' % self.pfc_wd['test_port_id'] + if self.pfc_wd['test_port_vlan_id'] is not None: + ptf_port += (constants.VLAN_SUB_INTERFACE_SEPARATOR + self.pfc_wd['test_port_vlan_id']) + self.ptf.command("ifconfig {} {}".format(ptf_port, self.pfc_wd['test_neighbor_addr'])) self.ptf.command("ping {} -c 10".format(vlan['addr'])) self.dut.command("docker exec -i swss arping {} -c 5".format(self.pfc_wd['test_neighbor_addr'])) @@ -409,6 +417,8 @@ def __init__(self, ptf, router_mac, pfc_params): self.pfc_wd_test_port_ids = pfc_params['test_port_ids'] self.pfc_wd_test_neighbor_addr = pfc_params['test_neighbor_addr'] self.pfc_wd_rx_neighbor_addr = pfc_params['rx_neighbor_addr'] + self.pfc_wd_test_port_vlan_id = pfc_params['test_port_vlan_id'] + self.pfc_wd_rx_port_vlan_id = pfc_params['rx_port_vlan_id'] self.port_type = pfc_params['port_type'] def verify_tx_egress(self, action): @@ -431,6 +441,10 @@ def verify_tx_egress(self, action): 'ip_dst': self.pfc_wd_test_neighbor_addr, 'port_type': self.port_type, 'wd_action': action} + if self.pfc_wd_rx_port_vlan_id is not None: + ptf_params['port_src_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if self.pfc_wd_test_port_vlan_id is not None: + ptf_params['port_dst_vlan_id'] = self.pfc_wd_test_port_vlan_id log_format = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") log_file = "/tmp/pfc_wd.PfcWdTest.{}.log".format(log_format) ptf_runner(self.ptf, "ptftests", "pfc_wd.PfcWdTest", "ptftests", params=ptf_params, @@ -457,6 +471,10 @@ def verify_rx_ingress(self, action): 'ip_dst': self.pfc_wd_rx_neighbor_addr, 'port_type': self.port_type, 'wd_action': action} + if self.pfc_wd_rx_port_vlan_id is not None: + ptf_params['port_dst_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if self.pfc_wd_test_port_vlan_id is not None: + ptf_params['port_src_vlan_id'] = self.pfc_wd_test_port_vlan_id log_format = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") log_file = "/tmp/pfc_wd.PfcWdTest.{}.log".format(log_format) ptf_runner(self.ptf, "ptftests", "pfc_wd.PfcWdTest", "ptftests", params=ptf_params, @@ -485,6 +503,10 @@ def verify_other_pfc_queue(self): 'ip_dst': self.pfc_wd_test_neighbor_addr, 'port_type': self.port_type, 'wd_action': 'forward'} + if self.pfc_wd_rx_port_vlan_id is not None: + ptf_params['port_src_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if self.pfc_wd_test_port_vlan_id is not None: + ptf_params['port_dst_vlan_id'] = self.pfc_wd_test_port_vlan_id log_format = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") log_file = "/tmp/pfc_wd.PfcWdTest.{}.log".format(log_format) ptf_runner(self.ptf, "ptftests", "pfc_wd.PfcWdTest", "ptftests", params=ptf_params, @@ -513,6 +535,10 @@ def verify_other_pfc_pg(self): 'ip_dst': self.pfc_wd_rx_neighbor_addr, 'port_type': self.port_type, 'wd_action': 'forward'} + if self.pfc_wd_rx_port_vlan_id is not None: + ptf_params['port_dst_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if self.pfc_wd_test_port_vlan_id is not None: + ptf_params['port_src_vlan_id'] = self.pfc_wd_test_port_vlan_id log_format = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") log_file = "/tmp/pfc_wd.PfcWdTest.{}.log".format(log_format) ptf_runner(self.ptf, "ptftests", "pfc_wd.PfcWdTest", "ptftests", params=ptf_params, @@ -531,6 +557,10 @@ def fill_buffer(self): 'ip_dst': self.pfc_wd_test_neighbor_addr, 'port_type': self.port_type, 'wd_action': 'dontcare'} + if self.pfc_wd_rx_port_vlan_id is not None: + ptf_params['port_src_vlan_id'] = self.pfc_wd_rx_port_vlan_id + if self.pfc_wd_test_port_vlan_id is not None: + ptf_params['port_dst_vlan_id'] = self.pfc_wd_test_port_vlan_id log_format = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") log_file = "/tmp/pfc_wd.PfcWdTest.{}.log".format(log_format) ptf_runner(self.ptf, "ptftests", "pfc_wd.PfcWdTest", "ptftests", params=ptf_params, From 0547e206b591354d75757b6576b5a254c613e6d3 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Sun, 25 Jul 2021 02:49:09 +0300 Subject: [PATCH 011/117] [test_ro_user] improve the delay for 'test_ro_user_banned_command' due to hostcfgd delay on boot (#3775) What is the motivation for this PR? After recent change introduced on this PR's: Azure/sonic-buildimage#7965 Azure/sonic-buildimage#8117 'hostcfgd' will be delayed in 90 seconds. If the test will run before the daemon has started, it will fail the test. This is to align with the new change and make sure the test will pass. Profiling the time it takes to configure tacacs after the daemon started can take time: main started -> Mon 12 Jul 2021 02:07:06 PM UTC 'tacacs_server_update' function finished -> Mon 12 Jul 2021 02:08:10 PM UTC Signed-off-by: Shlomi Bitton --- tests/tacacs/test_ro_user.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py index 1f590889e58..b3dfafcf64f 100644 --- a/tests/tacacs/test_ro_user.py +++ b/tests/tacacs/test_ro_user.py @@ -10,6 +10,9 @@ logger = logging.getLogger(__name__) +SLEEP_TIME = 10 +TIMEOUT_LIMIT = 120 + def ssh_remote_run(localhost, remote_ip, username, password, cmd): res = localhost.shell("sshpass -p {} ssh "\ "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "\ @@ -49,6 +52,24 @@ def ssh_remote_ban_run(localhost, remote_ip, username, password, cmd): logger.info("check command \"{}\" rc={}".format(cmd, res['rc'])) return res['rc'] != 0 and "Make sure your account has RW permission to current device" in res['stderr'] +def wait_for_tacacs(localhost, remote_ip, username, password): + current_attempt = 0 + cmd = 'systemctl status hostcfgd.service' + while (True): + # Wait for tacacs to finish configuration from hostcfgd + logger.info("Check if hostcfgd started and configured tacac attempt = {}".format(current_attempt)) + time.sleep(SLEEP_TIME) + output = localhost.shell("sshpass -p {} ssh "\ + "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "\ + "{}@{} {}".format( + password, username, remote_ip, cmd), module_ignore_errors=True)['stdout_lines'] + if "active (running)" in str(output): + return + else: + if current_attempt >= TIMEOUT_LIMIT/SLEEP_TIME: + pytest_assert(False, "hostcfgd did not start after {} seconds".format(TIMEOUT_LIMIT)) + else: + current_attempt += 1 def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] @@ -153,7 +174,8 @@ def test_ro_user_banned_command(localhost, duthosts, enum_rand_one_per_hwsku_hos ] # Wait until hostcfgd started and configured tacas authorization - time.sleep(100) + wait_for_tacacs(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], creds_all_duts[duthost]['tacacs_ro_user_passwd']) + for command in commands: banned = ssh_remote_ban_run(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], creds_all_duts[duthost]['tacacs_ro_user_passwd'], command) From 6feb92586a255c0c60bbd02025e207cbf422784a Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sun, 25 Jul 2021 20:37:49 -0700 Subject: [PATCH 012/117] Add docker login for restart-ptf (#3884) Signed-off-by: bingwang --- ansible/roles/vm_set/tasks/renumber_topo.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index 64c56b0884e..2d0196c8542 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -21,6 +21,14 @@ state: absent become: yes + - name: Try to login into docker registry + docker_login: + registry_url: "{{ docker_registry_host }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + become: yes + when: docker_registry_username is defined and docker_registry_password is defined + - name: Create ptf container ptf_{{ vm_set_name }} docker_container: name: ptf_{{ vm_set_name }} From d8a3e87fd880ef46f1d560a97e53fe1945619686 Mon Sep 17 00:00:00 2001 From: Mahesh Maddikayala <10645050+smaheshm@users.noreply.github.com> Date: Mon, 26 Jul 2021 13:28:15 -0700 Subject: [PATCH 013/117] [autorestart][Multi ASIC] support autorestart test for multi ASIC platform (#3696) * For multi ASIC platform, added container names in meta data file for frontend and backend ASIC on which autorestart test will be performed. --- .../autorestart/test_container_autorestart.py | 64 +++++++++-------- tests/common/devices/multi_asic.py | 69 +++++++++++++++++-- tests/common/devices/sonic.py | 34 +-------- tests/common/devices/sonic_asic.py | 9 ++- tests/conftest.py | 50 ++++++++++---- tests/test_pretest.py | 39 ++++++++--- 6 files changed, 173 insertions(+), 92 deletions(-) diff --git a/tests/autorestart/test_container_autorestart.py b/tests/autorestart/test_container_autorestart.py index a5a7fd9accc..e52009d767d 100755 --- a/tests/autorestart/test_container_autorestart.py +++ b/tests/autorestart/test_container_autorestart.py @@ -2,6 +2,7 @@ Test the auto-restart feature of containers """ import logging +import re from collections import defaultdict import pytest @@ -22,6 +23,7 @@ CONTAINER_CHECK_INTERVAL_SECS = 1 CONTAINER_STOP_THRESHOLD_SECS = 30 CONTAINER_RESTART_THRESHOLD_SECS = 180 +CONTAINER_NAME_REGEX = (r"([a-zA-Z_]+)(\d*)$") @pytest.fixture(autouse=True, scope='module') def config_reload_after_tests(duthost): @@ -29,7 +31,7 @@ def config_reload_after_tests(duthost): config_reload(duthost) @pytest.fixture(autouse=True) -def ignore_expected_loganalyzer_exception(duthost, loganalyzer, enum_dut_feature): +def ignore_expected_loganalyzer_exception(duthost, loganalyzer, enum_dut_feature_container): """ Ignore expected failure/error messages during testing the autorestart feature. @@ -61,19 +63,19 @@ def ignore_expected_loganalyzer_exception(duthost, loganalyzer, enum_dut_feature """ swss_syncd_teamd_regex = [ - ".*ERR swss#orchagent.*removeLag.*", - ".*ERR syncd#syncd.*driverEgressMemoryUpdate.*", - ".*ERR syncd#syncd.*brcm_sai*", - ".*ERR syncd#syncd.*SAI_API_UNSPECIFIED:sai_api_query.*", - ".*ERR syncd#syncd.*SAI_API_SWITCH:sai_query_attribute_enum_values_capability.*", - ".*ERR syncd#syncd.*SAI_API_SWITCH:sai_object_type_get_availability.*", - ".*ERR syncd#syncd.*sendApiResponse: api SAI_COMMON_API_SET failed in syncd mode.*", - ".*ERR syncd#syncd.*processQuadEvent.*", - ".*WARNING syncd#syncd.*skipping since it causes crash.*", - ".*ERR swss#portsyncd.*readData.*netlink reports an error=-33 on reading a netlink socket.*", - ".*ERR teamd#teamsyncd.*readData.*netlink reports an error=-33 on reading a netlink socket.*", - ".*ERR swss#orchagent.*set status: SAI_STATUS_ATTR_NOT_IMPLEMENTED_0.*", - ".*ERR swss#orchagent.*setIntfVlanFloodType.*", + ".*ERR swss[0-9]*#orchagent.*removeLag.*", + ".*ERR syncd[0-9]*#syncd.*driverEgressMemoryUpdate.*", + ".*ERR syncd[0-9]*#syncd.*brcm_sai*", + ".*ERR syncd[0-9]*#syncd.*SAI_API_UNSPECIFIED:sai_api_query.*", + ".*ERR syncd[0-9]*#syncd.*SAI_API_SWITCH:sai_query_attribute_enum_values_capability.*", + ".*ERR syncd[0-9]*#syncd.*SAI_API_SWITCH:sai_object_type_get_availability.*", + ".*ERR syncd[0-9]*#syncd.*sendApiResponse: api SAI_COMMON_API_SET failed in syncd mode.*", + ".*ERR syncd[0-9]*#syncd.*processQuadEvent.*", + ".*WARNING syncd[0-9]*#syncd.*skipping since it causes crash.*", + ".*ERR swss[0-9]*#portsyncd.*readData.*netlink reports an error=-33 on reading a netlink socket.*", + ".*ERR teamd[0-9]*#teamsyncd.*readData.*netlink reports an error=-33 on reading a netlink socket.*", + ".*ERR swss[0-9]*#orchagent.*set status: SAI_STATUS_ATTR_NOT_IMPLEMENTED_0.*", + ".*ERR swss[0-9]*#orchagent.*setIntfVlanFloodType.*", ".*ERR snmp#snmpd.*", ] ignore_regex_dict = { @@ -93,7 +95,8 @@ def ignore_expected_loganalyzer_exception(duthost, loganalyzer, enum_dut_feature 'teamd' : swss_syncd_teamd_regex, } - _, feature = decode_dut_port_name(enum_dut_feature) + _, container_name = decode_dut_port_name(enum_dut_feature_container) + feature = re.match(CONTAINER_NAME_REGEX, container_name).group(1) if loganalyzer: loganalyzer[duthost.hostname].ignore_regex.extend(ignore_regex_dict['common']) @@ -307,9 +310,12 @@ def run_test_on_single_container(duthost, container_name, tbinfo): if tbinfo["topo"]["type"] != "t0": skip_condition.append("radv") + # bgp0 -> bgp, bgp -> bgp + feature_name = re.match(CONTAINER_NAME_REGEX, container_name).group(1) + # Skip testing the database container, radv container on T1 devices and containers/services which are disabled - pytest_require(container_name not in skip_condition, - "Skipping test for container {}".format(container_name)) + pytest_require(feature_name not in skip_condition, + "Skipping test for container {}".format(feature_name)) is_running = is_container_running(duthost, container_name) pytest_assert(is_running, "Container '{}' is not running. Exiting...".format(container_name)) @@ -320,9 +326,9 @@ def run_test_on_single_container(duthost, container_name, tbinfo): logger.info("Start testing the container '{}'...".format(container_name)) restore_disabled_state = False - if container_autorestart_states[container_name] == "disabled": + if container_autorestart_states[feature_name] == "disabled": logger.info("Change auto-restart state of container '{}' to be 'enabled'".format(container_name)) - duthost.shell("sudo config feature autorestart {} enabled".format(container_name)) + duthost.shell("sudo config feature autorestart {} enabled".format(feature_name)) restore_disabled_state = True # Currently we select 'rsyslogd' as non-critical processes for testing based on @@ -338,7 +344,7 @@ def run_test_on_single_container(duthost, container_name, tbinfo): for critical_process in critical_process_list: # Skip 'dsserve' process since it was not managed by supervisord # TODO: Should remove the following two lines once the issue was solved in the image. - if container_name == "syncd" and critical_process == "dsserve": + if feature_name == "syncd" and critical_process == "dsserve": continue program_status, program_pid = get_program_info(duthost, container_name, critical_process) @@ -364,7 +370,7 @@ def run_test_on_single_container(duthost, container_name, tbinfo): if restore_disabled_state: logger.info("Restore auto-restart state of container '{}' to 'disabled'".format(container_name)) - duthost.shell("sudo config feature autorestart {} disabled".format(container_name)) + duthost.shell("sudo config feature autorestart {} disabled".format(feature_name)) if not postcheck_critical_processes_status(duthost, container_autorestart_states, up_bgp_neighbors): config_reload(duthost) @@ -373,17 +379,21 @@ def run_test_on_single_container(duthost, container_name, tbinfo): logger.info("End of testing the container '{}'".format(container_name)) -def test_containers_autorestart(duthosts, enum_dut_feature, enum_rand_one_per_hwsku_frontend_hostname, tbinfo): +def test_containers_autorestart( + duthosts, enum_dut_feature_container, + enum_rand_one_per_hwsku_frontend_hostname, tbinfo +): """ @summary: Test the auto-restart feature of each container against two scenarios: killing a non-critical process to verify the container is still running; killing each critical process to verify the container will be stopped and restarted """ - dut_name, feature = decode_dut_port_name(enum_dut_feature) - pytest_require(dut_name == enum_rand_one_per_hwsku_frontend_hostname and feature != "unknown", - "Skip test on dut host {} (chosen {}) feature {}" - .format(dut_name, enum_rand_one_per_hwsku_frontend_hostname, feature)) + dut_name, feature = decode_dut_port_name(enum_dut_feature_container) + pytest_require( + dut_name == enum_rand_one_per_hwsku_frontend_hostname and feature != "unknown", + "Skip test on dut host {} (chosen {}) feature {}" + .format(dut_name, enum_rand_one_per_hwsku_frontend_hostname, feature) + ) duthost = duthosts[dut_name] run_test_on_single_container(duthost, feature, tbinfo) - diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index 4eda547b21a..155e80403a8 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -186,7 +186,7 @@ def get_vtysh_cmd_for_namespace(self, cmd, namespace): return cmd ns_cmd = cmd.replace('vtysh', 'vtysh -n {}'.format(asic_id)) return ns_cmd - + def get_linux_ip_cmd_for_namespace(self, cmd, namespace): if not namespace: return cmd @@ -350,8 +350,8 @@ def get_queue_oid(self, port, queue_num): def has_config_subcommand(self, command): """ Check if a config/show subcommand exists on the device - - It is up to the caller of the function to ensure that `command` + + It is up to the caller of the function to ensure that `command` does not have any unintended side effects when run Args: @@ -360,9 +360,70 @@ def has_config_subcommand(self, command): (bool) True if the command exists, false otherwise """ try: - self.shell(command) + self.shell(command) # If the command executes successfully, we can assume it exists return True except RunAnsibleModuleFail as e: # If 'No such command' is found in stderr, the command doesn't exist return 'No such command' not in e.results['stderr'] + + def disable_syslog_rate_limit(self, feature): + """ + Disable Rate limit for a given service + """ + services = [feature] + + if (feature in self.sonichost.DEFAULT_ASIC_SERVICES): + services = [asic.get_docker_name(feature) for asic in self.asics] + + for docker in services: + cmd_disable_rate_limit = ( + r"docker exec -i {} sed -i " + r"'s/^\$SystemLogRateLimit/#\$SystemLogRateLimit/g' " + r"/etc/rsyslog.conf" + ) + cmd_reload = r"docker exec -i {} supervisorctl restart rsyslogd" + cmds = [] + cmds.append(cmd_disable_rate_limit.format(docker)) + cmds.append(cmd_reload.format(docker)) + self.sonichost.shell_cmds(cmds=cmds) + + def get_bgp_neighbors(self): + """ + Get a diction of BGP neighbor states + + Args: None + + Returns: dictionary { (neighbor_ip : info_dict)* } + + """ + bgp_neigh = {} + for asic in self.asics: + bgp_info = asic.bgp_facts() + bgp_neigh.update(bgp_info["ansible_facts"]["bgp_neighbors"]) + + return bgp_neigh + + def check_bgp_session_state(self, neigh_ips, state="established"): + """ + @summary: check if current bgp session equals to the target state + + @param neigh_ips: bgp neighbor IPs + @param state: target state + """ + neigh_ips = [ip.lower() for ip in neigh_ips] + neigh_ok = [] + + for asic in self.asics: + bgp_facts = asic.bgp_facts()['ansible_facts'] + logging.info("bgp_facts: {}".format(bgp_facts)) + for k, v in bgp_facts['bgp_neighbors'].items(): + if v['state'] == state: + if k.lower() in neigh_ips: + neigh_ok.append(k) + logging.info("bgp neighbors that match the state: {}".format(neigh_ok)) + + if len(neigh_ips) == len(neigh_ok): + return True + + return False diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 7e04279a45b..57ba5c621a2 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -29,6 +29,7 @@ class SonicHost(AnsibleHostBase): This type of host contains information about the SONiC device (device info, services, etc.), and also provides the ability to run Ansible modules on the SONiC device. """ + DEFAULT_ASIC_SERVICES = ["bgp", "database", "lldp", "swss", "syncd", "teamd"] def __init__(self, ansible_adhoc, hostname, @@ -944,39 +945,6 @@ def get_bgp_neighbor_info(self, neighbor_ip): return nbinfo[str(neighbor_ip)] - def get_bgp_neighbors(self): - """ - Get a diction of BGP neighbor states - - Args: None - - Returns: dictionary { (neighbor_ip : info_dict)* } - - """ - bgp_facts = self.bgp_facts()['ansible_facts'] - return bgp_facts['bgp_neighbors'] - - def check_bgp_session_state(self, neigh_ips, state="established"): - """ - @summary: check if current bgp session equals to the target state - - @param neigh_ips: bgp neighbor IPs - @param state: target state - """ - neigh_ips = [ip.lower() for ip in neigh_ips] - neigh_ok = [] - bgp_facts = self.bgp_facts()['ansible_facts'] - logging.info("bgp_facts: {}".format(bgp_facts)) - for k, v in bgp_facts['bgp_neighbors'].items(): - if v['state'] == state: - if k.lower() in neigh_ips: - neigh_ok.append(k) - logging.info("bgp neighbors that match the state: {}".format(neigh_ok)) - if len(neigh_ips) == len(neigh_ok): - return True - - return False - def check_bgp_session_nsf(self, neighbor_ip): """ @summary: check if bgp neighbor session enters NSF state or not diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py index 088b759fbff..a9d4d8bf0a1 100644 --- a/tests/common/devices/sonic_asic.py +++ b/tests/common/devices/sonic_asic.py @@ -15,7 +15,6 @@ class SonicAsic(object): For example, passing asic_id, namespace, instance_id etc. to ansible module to deal with namespaces. """ - _DEFAULT_ASIC_SERVICES = ["bgp", "database", "lldp", "swss", "syncd", "teamd"] _MULTI_ASIC_SERVICE_NAME = "{}@{}" # service name, asic_id _MULTI_ASIC_DOCKER_NAME = "{}{}" # docker name, asic_id @@ -48,12 +47,12 @@ def get_critical_services(self): for the namespace(asic) If the dut is multi asic, then the asic_id is appended t0 the - _DEFAULT_ASIC_SERVICES list + self.sonichost.DEFAULT_ASIC_SERVICES list Returns: [list]: list of the services running the namespace/asic """ a_service = [] - for service in self._DEFAULT_ASIC_SERVICES: + for service in self.sonichost.DEFAULT_ASIC_SERVICES: a_service.append("{}{}".format( service, self.asic_index if self.sonichost.is_multi_asic else "")) return a_service @@ -174,7 +173,7 @@ def interface_facts(self, *module_args, **complex_args): def get_service_name(self, service): if (not self.sonichost.is_multi_asic or - service not in self._DEFAULT_ASIC_SERVICES + service not in self.sonichost.DEFAULT_ASIC_SERVICES ): return service @@ -182,7 +181,7 @@ def get_service_name(self, service): def get_docker_name(self, service): if (not self.sonichost.is_multi_asic or - service not in self._DEFAULT_ASIC_SERVICES + service not in self.sonichost.DEFAULT_ASIC_SERVICES ): return service diff --git a/tests/conftest.py b/tests/conftest.py index 70fde19df46..7878527b85a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -293,6 +293,7 @@ def rand_one_dut_portname_oper_up(request): oper_up_ports = random.sample(oper_up_ports, 1) return oper_up_ports[0] + @pytest.fixture(scope="module") def rand_one_dut_lossless_prio(request): lossless_prio_list = generate_priority_lists(request, 'lossless') @@ -300,6 +301,7 @@ def rand_one_dut_lossless_prio(request): lossless_prio_list = random.sample(lossless_prio_list, 1) return lossless_prio_list[0] + @pytest.fixture(scope="module", autouse=True) def reset_critical_services_list(duthosts): """ @@ -308,6 +310,7 @@ def reset_critical_services_list(duthosts): """ [a_dut.critical_services_tracking_list() for a_dut in duthosts] + @pytest.fixture(scope="session") def localhost(ansible_adhoc): return Localhost(ansible_adhoc) @@ -325,6 +328,7 @@ def ptfhost(ansible_adhoc, tbinfo, duthost): ptf_host = duthost.host.options["inventory_manager"].get_host(duthost.hostname).get_vars()["ptf_host"] return PTFHost(ansible_adhoc, ptf_host) + @pytest.fixture(scope="module") def k8smasters(ansible_adhoc, request): """ @@ -353,6 +357,7 @@ def k8scluster(k8smasters): k8s_master_cluster = K8sMasterCluster(k8smasters) return k8s_master_cluster + @pytest.fixture(scope="module") def nbrhosts(ansible_adhoc, tbinfo, creds, request): """ @@ -381,6 +386,7 @@ def nbrhosts(ansible_adhoc, tbinfo, creds, request): raise ValueError("Unknown neighbor type %s" % (neighbor_type, )) return devices + @pytest.fixture(scope="module") def fanouthosts(ansible_adhoc, conn_graph_facts, creds): """ @@ -537,6 +543,7 @@ def pytest_runtest_makereport(item, call): setattr(item, "rep_" + rep.when, rep) + def fetch_dbs(duthost, testname): dbs = [[0, "appdb"], [1, "asicdb"], [2, "counterdb"], [4, "configdb"]] for db in dbs: @@ -555,6 +562,7 @@ def collect_techsupport_on_dut(request, a_dut): logging.info("########### Collected tech support for test {} ###########".format(testname)) + @pytest.fixture def collect_techsupport(request, duthosts, enum_dut_hostname): yield @@ -563,11 +571,13 @@ def collect_techsupport(request, duthosts, enum_dut_hostname): duthost = duthosts[enum_dut_hostname] collect_techsupport_on_dut(request, duthost) + @pytest.fixture def collect_techsupport_all_duts(request, duthosts): yield [collect_techsupport_on_dut(request, a_dut) for a_dut in duthosts] + @pytest.fixture(scope="session", autouse=True) def tag_test_report(request, pytestconfig, tbinfo, duthost, record_testsuite_property): if not request.config.getoption("--junit-xml"): @@ -884,18 +894,23 @@ def generate_port_lists(request, port_scope): return ret if ret else empty -def generate_dut_feature_list(request): +def generate_dut_feature_container_list(request): + """ + Generate list of containers given the list of features. + List of features and container names are both obtained from + metadata file + """ empty = [ encode_dut_port_name('unknown', 'unknown') ] tbname = request.config.getoption("--testbed") if not tbname: return empty - folder = 'metadata' - filepath = os.path.join(folder, tbname + '.json') + folder = "metadata" + filepath = os.path.join(folder, tbname + ".json") try: - with open(filepath, 'r') as yf: + with open(filepath, "r") as yf: metadata = json.load(yf) except IOError as e: return empty @@ -904,14 +919,23 @@ def generate_dut_feature_list(request): return empty meta = metadata[tbname] - ret = [] + container_list = [] + for dut, val in meta.items(): - if 'features' not in val: + if "features" not in val: continue - for feature, _ in val['features'].items(): - ret.append(encode_dut_port_name(dut, feature)) + for feature in val["features"].keys(): + dut_info = meta[dut] + services = dut_info["asic_services"].get(feature) + + if services is not None: + for service in services: + container_list.append(encode_dut_port_name(dut, service)) + else: + container_list.append(encode_dut_port_name(dut, feature)) + + return container_list - return ret if ret else empty def generate_priority_lists(request, prio_scope): empty = [] @@ -1021,10 +1045,10 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("enum_dut_portchannel_oper_up", generate_port_lists(metafunc, "oper_up_pcs")) if "enum_dut_portchannel_admin_up" in metafunc.fixturenames: metafunc.parametrize("enum_dut_portchannel_admin_up", generate_port_lists(metafunc, "admin_up_pcs")) - - if "enum_dut_feature" in metafunc.fixturenames: - metafunc.parametrize("enum_dut_feature", generate_dut_feature_list(metafunc)) - + if "enum_dut_feature_container" in metafunc.fixturenames: + metafunc.parametrize( + "enum_dut_feature_container", generate_dut_feature_container_list(metafunc) + ) if 'enum_dut_lossless_prio' in metafunc.fixturenames: metafunc.parametrize("enum_dut_lossless_prio", generate_priority_lists(metafunc, 'lossless')) if 'enum_dut_lossy_prio' in metafunc.fixturenames: diff --git a/tests/test_pretest.py b/tests/test_pretest.py index 68d3f005958..ac99a8491e5 100644 --- a/tests/test_pretest.py +++ b/tests/test_pretest.py @@ -1,8 +1,11 @@ -import pytest -import logging import json -import time +import logging import os +import pytest +import random +import time + +from collections import defaultdict from jinja2 import Template from common.helpers.assertions import pytest_require @@ -56,7 +59,28 @@ def test_disable_container_autorestart(duthosts, enum_dut_hostname, disable_cont def collect_dut_info(dut): status = dut.show_interface(command='status')['ansible_facts']['int_status'] features, _ = dut.get_feature_status() - return { 'intf_status' : status, 'features' : features } + + if dut.sonichost.is_multi_asic: + front_end_asics = dut.get_frontend_asic_ids() + back_end_asic = dut.get_backend_asic_ids() + + asic_services = defaultdict(list) + for service in dut.sonichost.DEFAULT_ASIC_SERVICES: + # for multi ASIC randomly select one frontend ASIC + # and one backend ASIC + if dut.sonichost.is_multi_asic: + fe = random.choice(front_end_asics) + be = random.choice(back_end_asic) + asic_services[service] = [ + dut.get_docker_name(service, asic_index=fe), + dut.get_docker_name(service, asic_index=be) + ] + + return { + "intf_status": status, + "features": features, + "asic_services": asic_services, + } def test_update_testbed_metadata(duthosts, tbinfo): @@ -88,15 +112,10 @@ def test_disable_rsyslog_rate_limit(duthosts, enum_dut_hostname): # We don't want to fail here because it's an util logging.warn("Failed to retrieve feature status") return - cmd_disable_rate_limit = r"docker exec -i {} sed -i 's/^\$SystemLogRateLimit/#\$SystemLogRateLimit/g' /etc/rsyslog.conf" - cmd_reload = r"docker exec -i {} supervisorctl restart rsyslogd" for feature_name, state in features_dict.items(): if 'enabled' not in state: continue - cmds = [] - cmds.append(cmd_disable_rate_limit.format(feature_name)) - cmds.append(cmd_reload.format(feature_name)) - duthost.shell_cmds(cmds=cmds) + duthost.disable_syslog_rate_limit(feature_name) def collect_dut_lossless_prio(dut): config_facts = dut.config_facts(host=dut.hostname, source="running")['ansible_facts'] From 589afb330c71cc2d796c12b0c5cffc1588273d32 Mon Sep 17 00:00:00 2001 From: yozhao101 <56170650+yozhao101@users.noreply.github.com> Date: Mon, 26 Jul 2021 18:29:21 -0700 Subject: [PATCH 014/117] [pytest] Fix the fast failure when checking the format of Monit alerting message (#3809) What is the motivation for this PR? This PR aims to fix the fast failure issue when checking the format of Monit alerting message. Initially check_monit_last_output(...) was called by wait_until(...) to continuously check whether the format of Monit alerting message was correct or not. If the Monit alerting message was not found, this script should return false instead of failing this test directly. How did you do it? I replaced the statement pytest.fail(...) in the function check_monit_last_output(...) with return False. At the same time, some logging statements are added in order to show what action was done during testing. How did you verify/test it? I test this change on the DuT str-msn2700-22. Any platform specific information? N/A Supported testbed topology if it's a new test case? N/A --- tests/monit/test_monit_status.py | 54 +++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/tests/monit/test_monit_status.py b/tests/monit/test_monit_status.py index 11bee08cd27..60c3dcb407d 100644 --- a/tests/monit/test_monit_status.py +++ b/tests/monit/test_monit_status.py @@ -19,7 +19,7 @@ @pytest.fixture -def disable_lldp(duthosts, enum_rand_one_per_hwsku_frontend_hostname): +def stop_and_start_lldpmgrd(duthosts, enum_rand_one_per_hwsku_frontend_hostname): """Stops `lldpmgrd` process at setup stage and restarts it at teardwon. Args: @@ -31,13 +31,34 @@ def disable_lldp(duthosts, enum_rand_one_per_hwsku_frontend_hostname): None. """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - duthost.command("docker exec lldp supervisorctl stop lldpmgrd") + + logger.info("Stopping 'lldpmgrd' process in 'lldp' container ...") + stop_command_result = duthost.command("docker exec lldp supervisorctl stop lldpmgrd") + exit_code = stop_command_result["rc"] + pytest_assert(exit_code == 0, "Failed to stop 'lldpmgrd' process in 'lldp' container!") + logger.info("'lldpmgrd' process in 'lldp' container is stopped.") + if duthost.is_multi_asic: - duthost.command("docker exec lldp0 supervisorctl stop lldpmgrd") + logger.info("Stopping 'lldpmgrd' process in 'lldp0' container ...") + stop_command_result = duthost.command("docker exec lldp0 supervisorctl stop lldpmgrd") + exit_code = stop_command_result["rc"] + pytest_assert(exit_code == 0, "Failed to stop 'lldpmgrd' process in 'lldp0' container!") + logger.info("'lldpmgrd' process in 'lldp0' container is stopped.") + yield - duthost.command("docker exec lldp supervisorctl start lldpmgrd") + + logger.info("Starting 'lldpmgrd' process in 'lldp' container ...") + start_command_result = duthost.command("docker exec lldp supervisorctl start lldpmgrd") + exit_code = start_command_result["rc"] + pytest_assert(exit_code == 0, "Failed to start 'lldpmgrd' process in 'lldp' container!") + logger.info("'lldpmgrd' process in 'lldp' container is started.") + if duthost.is_multi_asic: - duthost.command("docker exec lldp0 supervisorctl start lldpmgrd") + logger.info("Starting 'lldpmgrd' process in 'lldp0' container ...") + start_command_result = duthost.command("docker exec lldp0 supervisorctl start lldpmgrd") + exit_code = start_command_result["rc"] + pytest_assert(exit_code == 0, "Failed to start 'lldpmgrd' process in 'lldp0' container!") + logger.info("'lldpmgrd' process in 'lldp0' container is started.") def check_monit_last_output(duthost): @@ -62,7 +83,7 @@ def check_monit_last_output(duthost): else: return "/usr/bin/lldpmgrd' is not running in host" in monit_last_output else: - pytest.fail("Failed to get Monit last output of process 'lldpmgrd'!") + return False def test_monit_status(duthosts, enum_rand_one_per_hwsku_frontend_hostname): @@ -76,16 +97,21 @@ def test_monit_status(duthosts, enum_rand_one_per_hwsku_frontend_hostname): Returns: None. """ + logger.info("Checking the running status of Monit ...") + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + def _monit_status(): monit_status_result = duthost.shell("sudo monit status", module_ignore_errors=True) return monit_status_result["rc"] == 0 - # Monit is configured with start delay = 300s, hence we wait up to 320s here - pytest_assert(wait_until(320, 20, _monit_status), - "Monit is either not running or not configured correctly") + # Monit is configured with start delay = 300s, hence we wait up to 320s here + pytest_assert(wait_until(320, 20, _monit_status), + "Monit is either not running or not configured correctly") + + logger.info("Checking the running status of Monit was done!") -def test_monit_reporting_message(duthosts, enum_rand_one_per_hwsku_frontend_hostname, disable_lldp): +def test_monit_reporting_message(duthosts, enum_rand_one_per_hwsku_frontend_hostname, stop_and_start_lldpmgrd): """Checks whether the format of alerting message from Monit is correct or not. 202012 and newer image version will be skipped for testing since Supervisord replaced Monit to do the monitoring critical processes. @@ -105,5 +131,9 @@ def test_monit_reporting_message(duthosts, enum_rand_one_per_hwsku_frontend_host pytest_require("201811" in duthost.os_version or "201911" in duthost.os_version, "Test is not supported for 202012 and newer image versions!") - if not wait_until(180, 60, check_monit_last_output, duthost): - pytest.fail("Expected Monit reporting message not found") + logger.info("Checking the format of Monit alerting message ...") + + pytest_assert(wait_until(180, 60, check_monit_last_output, duthost), + "Expected Monit reporting message not found") + + logger.info("Checking the format of Monit alerting message was done!") From 20b75b7ec706dcfd1bd8a8cb8a33283c7ae87f1a Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Mon, 26 Jul 2021 21:12:58 -0700 Subject: [PATCH 015/117] [advance-reboot] Add health check before and after fast/warm reboot (#3803) Add health checks before and after reboot testcases. Pre test check is needed to verify that the test starts in a healthy state, and to catch any issues that were caused by previous test. Post test checks are needed as advanced-reboot script takes care of traffic (data/control plane) verification only. This new verification adds: check_services, check_interfaces_and_transceivers, check_neighbors, verify_no_coredumps --- tests/platform_tests/test_advanced_reboot.py | 44 ++++++++++---------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py index 2aab6c8c71d..4da98e65e21 100644 --- a/tests/platform_tests/test_advanced_reboot.py +++ b/tests/platform_tests/test_advanced_reboot.py @@ -13,8 +13,8 @@ ### Tetcases to verify normal reboot procedure ### -@pytest.mark.usefixtures('get_advanced_reboot') -def test_fast_reboot(request, get_advanced_reboot, advanceboot_loganalyzer): +def test_fast_reboot(request, get_advanced_reboot, verify_dut_health, + advanceboot_loganalyzer): ''' Fast reboot test case is run using advacned reboot test fixture @@ -25,9 +25,9 @@ def test_fast_reboot(request, get_advanced_reboot, advanceboot_loganalyzer): advancedReboot.runRebootTestcase() -@pytest.mark.usefixtures('get_advanced_reboot') @pytest.mark.device_type('vs') -def test_warm_reboot(request, get_advanced_reboot, advanceboot_loganalyzer): +def test_warm_reboot(request, get_advanced_reboot, verify_dut_health, + advanceboot_loganalyzer): ''' Warm reboot test case is run using advacned reboot test fixture @@ -39,7 +39,8 @@ def test_warm_reboot(request, get_advanced_reboot, advanceboot_loganalyzer): ### Testcases to verify abruptly failed reboot procedure ### -def test_cancelled_fast_reboot(request, add_fail_step_to_reboot, verify_dut_health, get_advanced_reboot): +def test_cancelled_fast_reboot(request, add_fail_step_to_reboot, verify_dut_health, + get_advanced_reboot): ''' Negative fast reboot test case to verify DUT is left in stable state when fast reboot procedure abruptly ends. @@ -52,7 +53,8 @@ def test_cancelled_fast_reboot(request, add_fail_step_to_reboot, verify_dut_heal @pytest.mark.device_type('vs') -def test_cancelled_warm_reboot(request, add_fail_step_to_reboot, verify_dut_health, get_advanced_reboot): +def test_cancelled_warm_reboot(request, add_fail_step_to_reboot, verify_dut_health, + get_advanced_reboot): ''' Negative warm reboot test case to verify DUT is left in stable state when warm reboot procedure abruptly ends. @@ -65,8 +67,8 @@ def test_cancelled_warm_reboot(request, add_fail_step_to_reboot, verify_dut_heal ### Tetcases to verify reboot procedure with SAD cases ### -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_sad(request, get_advanced_reboot, advanceboot_neighbor_restore): +def test_warm_reboot_sad(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db, advanceboot_neighbor_restore): ''' Warm reboot with sad path @@ -93,8 +95,8 @@ def test_warm_reboot_sad(request, get_advanced_reboot, advanceboot_neighbor_rest ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_multi_sad(request, get_advanced_reboot, advanceboot_neighbor_restore): +def test_warm_reboot_multi_sad(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db, advanceboot_neighbor_restore): ''' Warm reboot with multi sad path @@ -130,8 +132,8 @@ def test_warm_reboot_multi_sad(request, get_advanced_reboot, advanceboot_neighbo ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_multi_sad_inboot(request, get_advanced_reboot): +def test_warm_reboot_multi_sad_inboot(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db): ''' Warm reboot with multi sad path (during boot) @@ -152,8 +154,8 @@ def test_warm_reboot_multi_sad_inboot(request, get_advanced_reboot): ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_sad_bgp(request, get_advanced_reboot, advanceboot_neighbor_restore): +def test_warm_reboot_sad_bgp(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db, advanceboot_neighbor_restore): ''' Warm reboot with sad (bgp) @@ -175,8 +177,8 @@ def test_warm_reboot_sad_bgp(request, get_advanced_reboot, advanceboot_neighbor_ ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_sad_lag_member(request, get_advanced_reboot, advanceboot_neighbor_restore): +def test_warm_reboot_sad_lag_member(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db, advanceboot_neighbor_restore): ''' Warm reboot with sad path (lag member) @@ -207,8 +209,8 @@ def test_warm_reboot_sad_lag_member(request, get_advanced_reboot, advanceboot_ne ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_sad_lag(request, get_advanced_reboot, advanceboot_neighbor_restore): +def test_warm_reboot_sad_lag(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db, advanceboot_neighbor_restore): ''' Warm reboot with sad path (lag) @@ -230,8 +232,8 @@ def test_warm_reboot_sad_lag(request, get_advanced_reboot, advanceboot_neighbor_ ) -@pytest.mark.usefixtures('get_advanced_reboot', 'backup_and_restore_config_db') -def test_warm_reboot_sad_vlan_port(request, get_advanced_reboot): +def test_warm_reboot_sad_vlan_port(request, get_advanced_reboot, verify_dut_health, + backup_and_restore_config_db): ''' Warm reboot with sad path (vlan port) @@ -249,4 +251,4 @@ def test_warm_reboot_sad_vlan_port(request, get_advanced_reboot): advancedReboot.runRebootTestcase( prebootList=prebootList, prebootFiles='peer_dev_info,neigh_port_info' - ) \ No newline at end of file + ) From 2908d4d5cc5cdd869d9ba88185772edc68ba107b Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:54:34 +0800 Subject: [PATCH 016/117] [test_bgp_allow_list] Enable t1-backend topology (#3885) Approach What is the motivation for this PR? As the subject. How did you do it? Check spine_neighbors before using it because there are no spine neighbors on t1-backend topo. Signed-off-by: Longxiang Lyu lolv@microsoft.com --- tests/bgp/test_bgp_allow_list.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/bgp/test_bgp_allow_list.py b/tests/bgp/test_bgp_allow_list.py index 67d52022dc0..4ad45f92dd6 100644 --- a/tests/bgp/test_bgp_allow_list.py +++ b/tests/bgp/test_bgp_allow_list.py @@ -88,7 +88,9 @@ def setup(tbinfo, nbrhosts, duthosts, rand_one_dut_hostname): tor_neighbors = natsorted([neighbor for neighbor in nbrhosts.keys() if neighbor.endswith('T0')]) tor1 = tor_neighbors[0] spine_neighbors = natsorted([neighbor for neighbor in nbrhosts.keys() if neighbor.endswith('T2')]) - other_neighbors = tor_neighbors[1:3] + spine_neighbors[0:2] # Only check a few neighbors to save time + other_neighbors = tor_neighbors[1:3] # Only check a few neighbors to save time + if spine_neighbors: + other_neighbors += spine_neighbors[0:2] tor1_offset = tbinfo['topo']['properties']['topology']['VMs'][tor1]['vm_offset'] tor1_exabgp_port = EXABGP_BASE_PORT + tor1_offset From 1424492cea8abcef69b05f2c4c4c4d67e04119a9 Mon Sep 17 00:00:00 2001 From: Kevin Wang <65380078+kevinskwang@users.noreply.github.com> Date: Tue, 27 Jul 2021 05:20:44 -0700 Subject: [PATCH 017/117] [test_http_copy] skip the http test (#3893) What is the motivation for this PR? The case doesn't clean up the files in /tmp of ptf, it will casue the subsequent case failed. How did you do it? just skip this test before issue is addressed. Signed-off-by: Kevin Wang --- tests/http/test_http_copy.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/http/test_http_copy.py b/tests/http/test_http_copy.py index e5519775352..8772267672f 100644 --- a/tests/http/test_http_copy.py +++ b/tests/http/test_http_copy.py @@ -15,7 +15,9 @@ def test_http_copy(duthosts, rand_one_dut_hostname, ptfhost): """Test that HTTP (copy) can be used to download objects to the DUT""" - + + pytest.skip("---- test doesn't clean up the files in /tmp, it will cause the subsequent cases in /tmp fail," + "skipping until the issue is addressed ----") duthost = duthosts[rand_one_dut_hostname] ptf_ip = ptfhost.mgmt_ip From 3ccd26a2ca35f2e3a9847743cf820d99a0204388 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Tue, 27 Jul 2021 05:35:52 -0700 Subject: [PATCH 018/117] Change test running dir for test_lag_2 (#3894) Signed-off-by: bingwang --- tests/pc/test_lag_2.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py index b34cf106de5..0940d7a5063 100644 --- a/tests/pc/test_lag_2.py +++ b/tests/pc/test_lag_2.py @@ -19,19 +19,25 @@ pytest.mark.usefixtures('disable_route_checker_module') ] +# The dir will be deleted from host, so be sure not to use system dir +TEST_DIR = "/tmp/acstests/" + @pytest.fixture(scope="module") def common_setup_teardown(ptfhost): logger.info("########### Setup for lag testing ###########") + ptfhost.shell("mkdir -p {}".format(TEST_DIR)) # Copy PTF test into PTF-docker for test LACP DU test_files = ['lag_test.py', 'acs_base_test.py', 'router_utils.py'] for test_file in test_files: src = "../ansible/roles/test/files/acstests/%s" % test_file - dst = "/tmp/%s" % test_file + dst = TEST_DIR + test_file ptfhost.copy(src=src, dest=dst) yield ptfhost + ptfhost.file(path=TEST_DIR, state="absent") + class LagTest: def __init__(self, duthost, tbinfo, ptfhost, nbrhosts, fanouthosts, conn_graph_facts): self.duthost = duthost @@ -80,7 +86,7 @@ def __verify_lag_lacp_timing(self, lacp_timer, exp_iface): 'ether_type': 0x8809, 'interval_count': 3 } - ptf_runner(self.ptfhost, '/tmp', "lag_test.LacpTimingTest", '/root/ptftests', params=params) + ptf_runner(self.ptfhost, TEST_DIR, "lag_test.LacpTimingTest", '/root/ptftests', params=params) def __verify_lag_minlink( self, From 1c2360a5bc83d4953fa8cc5c843894bd7e0b02f2 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Tue, 27 Jul 2021 07:10:51 -0700 Subject: [PATCH 019/117] [snmp facts] abstract snmp facts retrieving and add wait support (#3891) What is the motivation for this PR? SNMP test are failing since the flex counter delay change was added to latest master/202012 branch. If the DUT freshly went through a config reload, snmp facts retrieving would fail due to counters are not created yet. How did you do it? abstract snmp facts retrieving in a helper function and changed all existing callers to use the helper. add wait support to the helper and changed tests/snmp/test* to wait for the facts. How did you verify/test it? Issue "sudo config reload -y" on dut and immediately start snmp test bypasing sanity check. Without the change, the first few tests would fail in ansible module snmp_facts.py. With the change, tests passed. --- tests/cacl/test_cacl_function.py | 12 ++++---- tests/common/helpers/snmp_helpers.py | 41 +++++++++++++++++++++++++++ tests/mvrf/test_mgmtvrf.py | 3 +- tests/snmp/test_snmp_cpu.py | 6 ++-- tests/snmp/test_snmp_default_route.py | 4 ++- tests/snmp/test_snmp_interfaces.py | 7 +++-- tests/snmp/test_snmp_lldp.py | 3 +- tests/snmp/test_snmp_loopback.py | 3 +- tests/snmp/test_snmp_memory.py | 9 +++--- tests/snmp/test_snmp_pfc_counters.py | 3 +- tests/snmp/test_snmp_phy_entity.py | 3 +- tests/snmp/test_snmp_psu.py | 7 +++-- tests/snmp/test_snmp_queue.py | 3 +- tests/snmp/test_snmp_v2mib.py | 5 ++-- 14 files changed, 83 insertions(+), 26 deletions(-) create mode 100644 tests/common/helpers/snmp_helpers.py diff --git a/tests/cacl/test_cacl_function.py b/tests/cacl/test_cacl_function.py index 3dcf08d9f39..826248596de 100644 --- a/tests/cacl/test_cacl_function.py +++ b/tests/cacl/test_cacl_function.py @@ -1,5 +1,7 @@ import pytest +from tests.common.helpers.snmp_helpers import get_snmp_facts + pytestmark = [ pytest.mark.disable_loganalyzer, # disable automatic loganalyzer globally pytest.mark.topology('any'), @@ -17,7 +19,7 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): dut_mgmt_ip = duthost.mgmt_ip # Ensure we can gather basic SNMP facts from the device - res = localhost.snmp_facts(host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity']) + res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity']) if 'ansible_facts' not in res: pytest.fail("Failed to retrieve SNMP facts from DuT!") @@ -58,8 +60,8 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): pytest.fail("SSH did not timeout when expected. {}".format(res.get('msg', ''))) # Ensure we CANNOT gather basic SNMP facts from the device - res = localhost.snmp_facts(host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], - module_ignore_errors=True) + res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], + module_ignore_errors=True) if 'ansible_facts' in res or "No SNMP response received before timeout" not in res.get('msg', ''): pytest.fail("SNMP did not time out when expected") @@ -84,8 +86,8 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): duthost.file(path="/tmp/config_service_acls.sh", state="absent") # Ensure we can gather basic SNMP facts from the device once again - res = localhost.snmp_facts(host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], - module_ignore_errors=True) + res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], + module_ignore_errors=True) if 'ansible_facts' not in res: pytest.fail("Failed to retrieve SNMP facts from DuT!") diff --git a/tests/common/helpers/snmp_helpers.py b/tests/common/helpers/snmp_helpers.py new file mode 100644 index 00000000000..6e404ba840a --- /dev/null +++ b/tests/common/helpers/snmp_helpers.py @@ -0,0 +1,41 @@ +from tests.common.utilities import wait_until +from tests.common.errors import RunAnsibleModuleFail +from tests.common.helpers.assertions import pytest_assert + +logger = logging.getLogger(__name__) + +DEF_WAIT_TIMEOUT=300 +DEF_CHECK_INTERVAL=10 + +global_snmp_facts={} + +def _get_snmp_facts(localhost, host, version, community, is_dell, module_ignore_errors): + snmp_facts = localhost.snmp_facts(host=host, version=version, community=community, is_dell=is_dell, module_ignore_errors=module_ignore_errors) + return snmp_facts + + +def _update_snmp_facts(localhost, host, version, community, is_dell): + global global_snmp_facts + + try: + global_snmp_facts = _get_snmp_facts(localhost, host, version, community, is_dell, + module_ignore_errors=False) + except RunAnsibleModuleFail as e: + logger.info("encountered error when getting snmp facts: {}".format(e)) + global_snmp_facts = {} + return False + + return True + + +def get_snmp_facts(localhost, host, version, community, is_dell=False, module_ignore_errors=False, + wait=False, timeouot=DEF_WAIT_TIMEOUT, interval=DEF_CHECK_INTERVAL): + if not wait: + return _get_snmp_facts(localhost, host, version, community, is_dell, module_ignore_errors) + + global global_snmp_facts + + pytest_assert(wait_until(timeouot, interval, _update_snmp_facts, localhost, host, version, + community, is_dell), "Timeout waiting for SNMP facts") + return global_snmp_facts + diff --git a/tests/mvrf/test_mgmtvrf.py b/tests/mvrf/test_mgmtvrf.py index caf0891210f..f71e272e7a7 100644 --- a/tests/mvrf/test_mgmtvrf.py +++ b/tests/mvrf/test_mgmtvrf.py @@ -7,6 +7,7 @@ from tests.common.utilities import wait_until from tests.common.config_reload import config_reload from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.snmp_helpers import get_snmp_facts from pkg_resources import parse_version pytestmark = [ @@ -105,7 +106,7 @@ def test_ping(self, duthost): duthost.ping() def test_snmp_fact(self, localhost, duthost, creds): - localhost.snmp_facts(host=duthost.mgmt_ip, version="v2c", community=creds['snmp_rocommunity']) + get_snmp_facts(localhost, host=duthost.mgmt_ip, version="v2c", community=creds['snmp_rocommunity']) class TestMvrfOutbound(): diff --git a/tests/snmp/test_snmp_cpu.py b/tests/snmp/test_snmp_cpu.py index b04498426b3..0fd40536b7c 100644 --- a/tests/snmp/test_snmp_cpu.py +++ b/tests/snmp/test_snmp_cpu.py @@ -2,6 +2,8 @@ import time import logging +from tests.common.helpers.snmp_helpers import get_snmp_facts + logger = logging.getLogger(__name__) pytestmark = [ @@ -34,7 +36,7 @@ def test_snmp_cpu(duthosts, enum_rand_one_per_hwsku_hostname, localhost, creds_a logger.info("found {} cpu on the dut".format(host_vcpus)) # Gather facts with SNMP version 2 - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], is_dell=True)['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], is_dell=True, wait=True)['ansible_facts'] assert int(snmp_facts['ansible_ChStackUnitCpuUtil5sec']) @@ -46,7 +48,7 @@ def test_snmp_cpu(duthosts, enum_rand_one_per_hwsku_hostname, localhost, creds_a time.sleep(20) # Gather facts with SNMP version 2 - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], is_dell=True)['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], is_dell=True, wait=True)['ansible_facts'] # Pull CPU utilization via shell # Explanation: Run top command with 2 iterations, 5sec delay. diff --git a/tests/snmp/test_snmp_default_route.py b/tests/snmp/test_snmp_default_route.py index efedcffea00..c75808e6b00 100644 --- a/tests/snmp/test_snmp_default_route.py +++ b/tests/snmp/test_snmp_default_route.py @@ -1,5 +1,7 @@ import pytest +from tests.common.helpers.snmp_helpers import get_snmp_facts + pytestmark = [ pytest.mark.topology('any'), pytest.mark.device_type('vs') @@ -12,7 +14,7 @@ def test_snmp_default_route(duthosts, enum_rand_one_per_hwsku_frontend_hostname, duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] dut_result = duthost.shell('show ip route 0.0.0.0/0 | grep "\*"') dut_result_nexthops = [] diff --git a/tests/snmp/test_snmp_interfaces.py b/tests/snmp/test_snmp_interfaces.py index 1aa8cec3672..7623ebf141b 100644 --- a/tests/snmp/test_snmp_interfaces.py +++ b/tests/snmp/test_snmp_interfaces.py @@ -1,5 +1,6 @@ import pytest from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any'), @@ -142,7 +143,7 @@ def test_snmp_interfaces(localhost, creds_all_duts, duthosts, enum_rand_one_per_ namespace = duthost.get_namespace_from_asic_id(enum_asic_index) config_facts = duthost.config_facts(host=duthost.hostname, source="persistent", namespace=namespace)['ansible_facts'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] snmp_ifnames = [ v['name'] for k, v in snmp_facts['snmp_interfaces'].items() ] print snmp_ifnames @@ -162,7 +163,7 @@ def test_snmp_mgmt_interface(localhost, creds_all_duts, duthosts, enum_rand_one_ duthost = duthosts[enum_rand_one_per_hwsku_hostname] hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] snmp_ifnames = [ v['name'] for k, v in snmp_facts['snmp_interfaces'].items() ] @@ -189,7 +190,7 @@ def test_snmp_interfaces_mibs(duthosts, enum_rand_one_per_hwsku_hostname, localh duthost = duthosts[enum_rand_one_per_hwsku_hostname] namespace = duthost.get_namespace_from_asic_id(enum_asic_index) hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] config_facts = duthost.config_facts(host=duthost.hostname, source="persistent", namespace=namespace)['ansible_facts'] ports_list = [] diff --git a/tests/snmp/test_snmp_lldp.py b/tests/snmp/test_snmp_lldp.py index d2f0d38e319..031ef4f403a 100644 --- a/tests/snmp/test_snmp_lldp.py +++ b/tests/snmp/test_snmp_lldp.py @@ -1,5 +1,6 @@ import pytest import re +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any'), @@ -37,7 +38,7 @@ def test_snmp_lldp(duthosts, enum_rand_one_per_hwsku_hostname, localhost, creds_ pytest.skip("LLDP not supported on supervisor node") hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] mg_facts = {} for asic_id in duthost.get_asic_ids(): mg_facts_ns = duthost.asic_instance(asic_id).get_extended_minigraph_facts(tbinfo)['minigraph_neighbors'] diff --git a/tests/snmp/test_snmp_loopback.py b/tests/snmp/test_snmp_loopback.py index 1281f2d03d2..069e231850c 100644 --- a/tests/snmp/test_snmp_loopback.py +++ b/tests/snmp/test_snmp_loopback.py @@ -1,5 +1,6 @@ import pytest import ipaddress +from tests.common.helpers.snmp_helpers import get_snmp_facts try: # python3 from shlex import quote except ImportError: # python2 @@ -42,7 +43,7 @@ def test_snmp_loopback(duthosts, enum_rand_one_per_hwsku_frontend_hostname, nbrh """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] # Get first neighbor VM information nbr = nbrhosts[list(nbrhosts.keys())[0]] diff --git a/tests/snmp/test_snmp_memory.py b/tests/snmp/test_snmp_memory.py index 69700b52693..693b32f5573 100644 --- a/tests/snmp/test_snmp_memory.py +++ b/tests/snmp/test_snmp_memory.py @@ -6,6 +6,7 @@ import pytest from tests.common.helpers.assertions import pytest_assert # pylint: disable=import-error +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any') ] @@ -49,8 +50,8 @@ def test_snmp_memory(duthosts, enum_rand_one_per_hwsku_hostname, localhost, cred """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c", - community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=host_ip, version="v2c", + community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] facts = collect_memory(duthost) compare = (('ansible_sysTotalFreeMemery', 'MemFree'), ('ansible_sysTotalBuffMemory', 'Buffers'), ('ansible_sysCachedMemory', 'Cached')) @@ -75,8 +76,8 @@ def test_snmp_memory_load(duthosts, enum_rand_one_per_hwsku_hostname, localhost, # Start memory stress generation duthost = duthosts[enum_rand_one_per_hwsku_hostname] host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c", - community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=host_ip, version="v2c", + community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] mem_free = duthost.shell("grep MemFree /proc/meminfo | awk '{print $2}'")['stdout'] pytest_assert(CALC_DIFF(snmp_facts['ansible_sysTotalFreeMemery'], mem_free) < percent, "sysTotalFreeMemery differs by more than {}".format(percent)) diff --git a/tests/snmp/test_snmp_pfc_counters.py b/tests/snmp/test_snmp_pfc_counters.py index e518e42b355..c8e9d8a44df 100644 --- a/tests/snmp/test_snmp_pfc_counters.py +++ b/tests/snmp/test_snmp_pfc_counters.py @@ -1,4 +1,5 @@ import pytest +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any'), @@ -10,7 +11,7 @@ def test_snmp_pfc_counters(duthosts, enum_rand_one_per_hwsku_frontend_hostname, hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] # Check PFC counters # Ignore management ports, assuming the names starting with 'eth', eg. eth0 diff --git a/tests/snmp/test_snmp_phy_entity.py b/tests/snmp/test_snmp_phy_entity.py index 84f077c77f8..cf3da5797cd 100644 --- a/tests/snmp/test_snmp_phy_entity.py +++ b/tests/snmp/test_snmp_phy_entity.py @@ -6,6 +6,7 @@ from enum import Enum, unique from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_require +from tests.common.helpers.snmp_helpers import get_snmp_facts from tests.platform_tests.thermal_control_test_helper import mocker_factory pytestmark = [ @@ -202,7 +203,7 @@ def get_entity_and_sensor_mib(duthost, localhost, creds_all_duts): """ mib_info = {} hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] entity_mib = {} sensor_mib = {} for oid, info in snmp_facts['snmp_physical_entities'].items(): diff --git a/tests/snmp/test_snmp_psu.py b/tests/snmp/test_snmp_psu.py index d3409d0aa4e..4cbb7d58503 100644 --- a/tests/snmp/test_snmp_psu.py +++ b/tests/snmp/test_snmp_psu.py @@ -1,5 +1,6 @@ import pytest from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.snmp_helpers import get_snmp_facts PSU_STATUS_OK = 2 PSU_STATUS_FUNCTIONING_FAIL = 7 @@ -15,7 +16,7 @@ def test_snmp_numpsu(duthosts, enum_supervisor_dut_hostname, localhost, creds_al hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] res = duthost.shell("psuutil numpsus") assert int(res[u'rc']) == 0, "Failed to get number of PSUs" @@ -27,7 +28,7 @@ def test_snmp_numpsu(duthosts, enum_supervisor_dut_hostname, localhost, creds_al def test_snmp_psu_status(duthosts, enum_supervisor_dut_hostname, localhost, creds_all_duts): duthost = duthosts[enum_supervisor_dut_hostname] hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] psus_on = 0 msg = "Unexpected operstatus results {} != {} for PSU {}" @@ -49,4 +50,4 @@ def test_snmp_psu_status(duthosts, enum_supervisor_dut_hostname, localhost, cred pytest_assert(int(operstatus['operstatus']) == PSU_STATUS_MODULE_MISSING, msg.format(operstatus['operstatus'], PSU_STATUS_MODULE_MISSING, psu_indx)) - pytest_assert(psus_on >= 1, "At least one PSU should be with operstatus OK") \ No newline at end of file + pytest_assert(psus_on >= 1, "At least one PSU should be with operstatus OK") diff --git a/tests/snmp/test_snmp_queue.py b/tests/snmp/test_snmp_queue.py index 0265927649e..9c3e879d097 100644 --- a/tests/snmp/test_snmp_queue.py +++ b/tests/snmp/test_snmp_queue.py @@ -1,4 +1,5 @@ import pytest +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any'), @@ -11,7 +12,7 @@ def test_snmp_queues(duthosts, enum_rand_one_per_hwsku_hostname, localhost, cred pytest.skip("interfaces not present on supervisor node") hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] for k, v in snmp_facts['snmp_interfaces'].items(): if "Ethernet" in v['description']: diff --git a/tests/snmp/test_snmp_v2mib.py b/tests/snmp/test_snmp_v2mib.py index a07de5ae15a..8504806cd9b 100644 --- a/tests/snmp/test_snmp_v2mib.py +++ b/tests/snmp/test_snmp_v2mib.py @@ -4,6 +4,7 @@ import pytest from tests.common.helpers.assertions import pytest_assert # pylint: disable=import-error +from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ pytest.mark.topology('any') @@ -16,8 +17,8 @@ def test_snmp_v2mib(duthosts, enum_rand_one_per_hwsku_hostname, localhost, creds """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c", - community=creds_all_duts[duthost]["snmp_rocommunity"])['ansible_facts'] + snmp_facts = get_snmp_facts(localhost, host=host_ip, version="v2c", + community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] dut_facts = duthost.setup()['ansible_facts'] debian_ver = duthost.shell('cat /etc/debian_version')['stdout'] cmd = 'docker exec snmp grep "sysContact" /etc/snmp/snmpd.conf' From 6702741a1f9ef724e6fd505c70afc4f6e67d59e2 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Wed, 28 Jul 2021 13:40:03 +0800 Subject: [PATCH 020/117] [test_bgp_update_timer] Enable storage backend topologies (#3827) Approach What is the motivation for this PR? Enable test_bgp_update_timer on storage backend topologies. Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you do it? for setup_interfaces, enable it to parse sub interfaces on t1-backend or parse tagged vlan ports on t0-backend. modify bgp_update_packets to filter out only IPv4 packets only. How did you verify/test it? Run test_bgp_update_timer on t0-backend and t1-backend. --- tests/bgp/conftest.py | 20 +++++++++++++++++++- tests/bgp/test_bgp_update_timer.py | 2 +- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py index 6a21e35b799..08da1fec3bc 100644 --- a/tests/bgp/conftest.py +++ b/tests/bgp/conftest.py @@ -24,6 +24,7 @@ from bgp_helpers import DUMP_FILE, CUSTOM_DUMP_SCRIPT, CUSTOM_DUMP_SCRIPT_DEST, BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip +from tests.common import constants logger = logging.getLogger(__name__) @@ -274,10 +275,13 @@ def _setup_interfaces_dualtor(mg_facts, peer_count): def _setup_interfaces_t0(mg_facts, peer_count): try: connections = [] + is_backend_topo = "backend" in tbinfo["topo"]["name"] vlan_intf = _find_vlan_intferface(mg_facts) vlan_intf_name = vlan_intf["attachto"] vlan_intf_addr = "%s/%s" % (vlan_intf["addr"], vlan_intf["prefixlen"]) vlan_members = mg_facts["minigraph_vlans"][vlan_intf_name]["members"] + is_vlan_tagged = mg_facts["minigraph_vlans"][vlan_intf_name].get("type", "").lower() == "tagged" + vlan_id = mg_facts["minigraph_vlans"][vlan_intf_name]["vlanid"] local_interfaces = random.sample(vlan_members, peer_count) neighbor_addresses = generate_ips( peer_count, @@ -299,6 +303,8 @@ def _setup_interfaces_t0(mg_facts, peer_count): conn["local_addr"] = vlan_intf_addr conn["neighbor_addr"] = neighbor_addr conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][local_intf] + if is_backend_topo and is_vlan_tagged: + conn["neighbor_intf"] += (constants.VLAN_SUB_INTERFACE_SEPARATOR + vlan_id) conn["loopback_ip"] = loopback_ip connections.append(conn) @@ -318,6 +324,7 @@ def _setup_interfaces_t0(mg_facts, peer_count): def _setup_interfaces_t1(mg_facts, peer_count): try: connections = [] + is_backend_topo = "backend" in tbinfo["topo"]["name"] ipv4_interfaces = [] used_subnets = set() if mg_facts["minigraph_interfaces"]: @@ -337,6 +344,13 @@ def _setup_interfaces_t1(mg_facts, peer_count): ipv4_lag_interfaces.append(pt["attachto"]) used_subnets.add(ipaddress.ip_network(pt["subnet"])) + vlan_sub_interfaces = [] + if is_backend_topo: + for intf in mg_facts.get("minigraph_vlan_sub_interfaces"): + if _is_ipv4_address(intf["addr"]): + vlan_sub_interfaces.append(intf["attachto"]) + used_subnets.add(ipaddress.ip_network(intf["subnet"])) + subnet_prefixlen = list(used_subnets)[0].prefixlen _subnets = ipaddress.ip_network(u"10.0.0.0/24").subnets(new_prefix=subnet_prefixlen) subnets = (_ for _ in _subnets if _ not in used_subnets) @@ -349,7 +363,7 @@ def _setup_interfaces_t1(mg_facts, peer_count): if not loopback_ip: pytest.fail("ipv4 lo interface not found") - for intf, subnet in zip(random.sample(ipv4_interfaces + ipv4_lag_interfaces, peer_count), subnets): + for intf, subnet in zip(random.sample(ipv4_interfaces + ipv4_lag_interfaces + vlan_sub_interfaces, peer_count), subnets): conn = {} local_addr, neighbor_addr = [_ for _ in subnet][:2] conn["local_intf"] = "%s" % intf @@ -361,6 +375,10 @@ def _setup_interfaces_t1(mg_facts, peer_count): member_intf = mg_facts["minigraph_portchannels"][intf]["members"][0] conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][member_intf] conn["namespace"] = mg_facts["minigraph_portchannels"][intf]["namespace"] + elif constants.VLAN_SUB_INTERFACE_SEPARATOR in intf: + orig_intf, vlan_id = intf.split(constants.VLAN_SUB_INTERFACE_SEPARATOR) + ptf_port_index = str(mg_facts["minigraph_port_indices"][orig_intf]) + conn["neighbor_intf"] = "eth" + ptf_port_index + constants.VLAN_SUB_INTERFACE_SEPARATOR + vlan_id else: conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][intf] connections.append(conn) diff --git a/tests/bgp/test_bgp_update_timer.py b/tests/bgp/test_bgp_update_timer.py index 719b001205a..10a7dbe90e4 100644 --- a/tests/bgp/test_bgp_update_timer.py +++ b/tests/bgp/test_bgp_update_timer.py @@ -137,7 +137,7 @@ def bgp_update_packets(pcap_file): """Get bgp update packets from pcap file.""" packets = sniff( offline=pcap_file, - lfilter=lambda p: bgp.BGPHeader in p and p[bgp.BGPHeader].type == 2 + lfilter=lambda p: IP in p and bgp.BGPHeader in p and p[bgp.BGPHeader].type == 2 ) return packets From 13d46b7b2d3d9cf0531666ecda9dedc6708aeaa7 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 29 Jul 2021 00:25:24 +0800 Subject: [PATCH 021/117] RESTAPI - Add delay in test_data_path (#3782) Signed-off-by: bingwang --- tests/restapi/test_restapi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/restapi/test_restapi.py b/tests/restapi/test_restapi.py index 5fdcb982ed3..a8aeb92fa79 100644 --- a/tests/restapi/test_restapi.py +++ b/tests/restapi/test_restapi.py @@ -106,6 +106,8 @@ def test_data_path(construct_url, vlan_members): pytest_assert(r.status_code == 204) # Verify routes + # Add some delay before query + time.sleep(5) params = '{}' r = restapi.get_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-2', params) pytest_assert(r.status_code == 200) From 60af1acc4be6bc399e1d170b70dc14394c88c3ec Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Wed, 28 Jul 2021 14:57:52 -0700 Subject: [PATCH 022/117] [multi-asic][chassis] fix the port index in port_index_map for masic (#3897) What is the motivation for this PR? The change done in PR #3643 to use the get_port_map, does not work for multi-ASIC platforms. The existing logic in config_facts gives the same index for ports on different ASICs. The PR is a fix for this problem. get_port_map will use the port index present in the config_db. How did you do it? Change in config_facts to get the port index from config_db if its available. If the index is not present use the exisiting lofic to generate the index How did you verify/test it? run the test platform_tests/rest_reload_config.py to verify Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- ansible/library/config_facts.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index 13c2f0fee42..48256022a27 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -71,8 +71,16 @@ def create_maps(config): port_name_list = config["PORT"].keys() port_name_list_sorted = natsorted(port_name_list) - for idx, val in enumerate(port_name_list_sorted): - port_index_map[val] = idx + #get the port_index from config_db if available + port_index_map = { + name: int(v['index']) + for name, v in config['PORT'].iteritems() + if 'index' in v + } + if not port_index_map: + #if not available generate an index + for idx, val in enumerate(port_name_list_sorted): + port_index_map[val] = idx port_name_to_alias_map = { name : v['alias'] if 'alias' in v else '' for name, v in config["PORT"].iteritems()} @@ -136,7 +144,7 @@ def main(): cfg_file_path = PERSISTENT_CONFIG_PATH.format("") with open(cfg_file_path, "r") as f: config = json.load(f) - elif m_args["source"] == "running": + elif m_args["source"] == "running": config = get_running_config(module, namespace) results = get_facts(config) module.exit_json(ansible_facts=results) From 01baaeefcb73b8ea78c66c902b3859741df71825 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Wed, 28 Jul 2021 18:16:37 -0700 Subject: [PATCH 023/117] [advance-reboot] Fix reboot timing collection error (#3907) Kexec timing calculation from syslog messages is overwritten by bgp log messages. This happens due to same variable is first set and then unset as part of same function call by syslog and bgp log parsing. After the fix, the kexec time is fetched properly, and ultimately offset from kexec time capture are also as expected. --- tests/platform_tests/conftest.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index ed9c2738a02..e622a9d03ba 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -126,6 +126,19 @@ def get_report_summary(analyze_result, reboot_type): } return result_summary +def get_kexec_time(duthost, messages, result): + reboot_pattern = re.compile(r'.* NOTICE admin: Rebooting with /sbin/kexec -e to.*...') + reboot_time = "N/A" + logging.info("FINDING REBOOT PATTERN") + for message in messages: + # Get timestamp of reboot - Rebooting string + if re.search(reboot_pattern, message): + logging.info("FOUND REBOOT PATTERN for {}", duthost.hostname) + reboot_time = datetime.strptime(message.split(duthost.hostname)[0].strip(), FMT).strftime(FMT) + continue + result["reboot_time"] = { + "timestamp": {"Start": reboot_time}, + } def analyze_log_file(duthost, messages, result, offset_from_kexec): service_restart_times = dict() @@ -133,8 +146,6 @@ def analyze_log_file(duthost, messages, result, offset_from_kexec): logging.error("Expected messages not found in syslog") return None - reboot_pattern = re.compile(r'.* NOTICE admin: Rebooting with /sbin/kexec -e to.*...') - def service_time_check(message, status): time = datetime.strptime(message.split(duthost.hostname)[0].strip(), FMT) time = time.strftime(FMT) @@ -149,10 +160,6 @@ def service_time_check(message, status): reboot_time = "N/A" for message in messages: - # Get timestamp of reboot - Rebooting string - if re.search(reboot_pattern, message): - reboot_time = datetime.strptime(message.split(duthost.hostname)[0].strip(), FMT).strftime(FMT) - continue # Get stopping to started timestamps for services (swss, bgp, etc) for status, pattern in SERVICE_PATTERNS.items(): if re.search(pattern, message): @@ -195,9 +202,6 @@ def service_time_check(message, status): result["time_span"].update(service_restart_times) result["offset_from_kexec"] = offset_from_kexec - result["reboot_time"] = { - "timestamp": {"Start": reboot_time}, - } return result @@ -283,7 +287,10 @@ def advanceboot_loganalyzer(duthosts, rand_one_dut_hostname, request): offset_from_kexec = dict() for key, messages in result["expect_messages"].items(): - if "syslog" in key or "bgpd.log" in key: + if "syslog" in key: + get_kexec_time(duthost, messages, analyze_result) + analyze_log_file(duthost, messages, analyze_result, offset_from_kexec) + elif "bgpd.log" in key: analyze_log_file(duthost, messages, analyze_result, offset_from_kexec) elif "sairedis.rec" in key: analyze_sairedis_rec(messages, analyze_result, offset_from_kexec) From d6521600da35845557c4d12c579c311c57979993 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Thu, 29 Jul 2021 16:26:49 +0800 Subject: [PATCH 024/117] [Dynamic Buffer Calc] Add test cases for dynamic buffer calculation on top of port auto-negotiation (#3647) - What is the motivation for this PR? Add test cases for dynamic buffer calculation on top of port auto-negotiation - How did you do it? Add test cases for port auto-negotiation. Add different configurations for port auto-negotiation and verify the behavior of dynamic buffer calculation Skip the test if speed_to_test is not supported by the switch There is a logic to verify the calculated headroom size in the test: to compare the calculated headroom against the predefined pg_profile_lookup.ini Some speeds that supported by SAI, like 1G, isn't defined in the file because they are very unlikely to be used However, we need to take that speed for testing in some testbeds. Skip this verification in that case. - How did you verify/test it? Run the regression test. --- tests/qos/test_buffer.py | 156 +++++++++++++++++++++++++++++++++++---- 1 file changed, 143 insertions(+), 13 deletions(-) diff --git a/tests/qos/test_buffer.py b/tests/qos/test_buffer.py index d16895202e1..2e7399b58bc 100644 --- a/tests/qos/test_buffer.py +++ b/tests/qos/test_buffer.py @@ -4,6 +4,7 @@ import time import re import json +from natsort import natsorted import pytest @@ -505,20 +506,23 @@ def check_buffer_profile_details(duthost, initial_profiles, profile_name, profil speed = m.group(1) cable_length = m.group(2) std_profiles_for_speed = DEFAULT_LOSSLESS_HEADROOM_DATA.get(speed) - std_profile = std_profiles_for_speed.get(cable_length) - if std_profile: - # This means it's a profile with std speed and cable length. We can check whether the headroom data is correct - pytest_assert(profile_appldb['xon'] == std_profile['xon'] and profile_appldb['xoff'] == std_profile['xoff'] - and (profile_appldb['size'] == std_profile['size'] or DEFAULT_SHARED_HEADROOM_POOL_ENABLED), - "Generated profile {} doesn't match the std profile {}".format(profile_appldb, std_profile)) + if std_profiles_for_speed: + std_profile = std_profiles_for_speed.get(cable_length) + if std_profile: + # This means it's a profile with std speed and cable length. We can check whether the headroom data is correct + pytest_assert(profile_appldb['xon'] == std_profile['xon'] and profile_appldb['xoff'] == std_profile['xoff'] + and (profile_appldb['size'] == std_profile['size'] or DEFAULT_SHARED_HEADROOM_POOL_ENABLED), + "Generated profile {} doesn't match the std profile {}".format(profile_appldb, std_profile)) + else: + for std_cable_len, std_profile in std_profiles_for_speed.items(): + if int(std_cable_len[:-1]) > int(cable_length[:-1]): + pytest_assert(int(std_profile['xoff']) >= int(profile_appldb['xoff']), + "XOFF of generated profile {} is greater than standard profile {} while its cable length is less".format(profile_appldb, std_profile)) + else: + pytest_assert(int(std_profile['xoff']) <= int(profile_appldb['xoff']), + "XOFF of generated profile {} is less than standard profile {} while its cable length is greater".format(profile_appldb, std_profile)) else: - for std_cable_len, std_profile in std_profiles_for_speed.items(): - if int(std_cable_len[:-1]) > int(cable_length[:-1]): - pytest_assert(int(std_profile['xoff']) >= int(profile_appldb['xoff']), - "XOFF of generated profile {} is greater than standard profile {} while its cable length is less".format(profile_appldb, std_profile)) - else: - pytest_assert(int(std_profile['xoff']) <= int(profile_appldb['xoff']), - "XOFF of generated profile {} is less than standard profile {} while its cable length is greater".format(profile_appldb, std_profile)) + logging.info("Skip headroom checking because headroom information is not provided for speed {}".format(speed)) profiles_in_asicdb = set(duthost.shell('redis-cli -n 1 keys "ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE*"')['stdout'].split('\n')) diff = profiles_in_asicdb - initial_profiles @@ -692,6 +696,9 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p cable_len_to_test: To what cable length will the port's be changed """ duthost = duthosts[rand_one_dut_hostname] + supported_speeds = duthost.shell('redis-cli -n 6 hget "PORT_TABLE|{}" supported_speeds'.format(port_to_test))['stdout'] + if supported_speeds and speed_to_test not in supported_speeds: + pytest.skip('Speed is not supported by the port, skip') original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout'] original_cable_len = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout'] profile = duthost.shell('redis-cli hget "BUFFER_PG_TABLE:{}:3-4" profile'.format(port_to_test))['stdout'][1:-1] @@ -1527,6 +1534,129 @@ def test_port_admin_down(duthosts, rand_one_dut_hostname, conn_graph_facts, port ensure_pool_size(duthost, 60, original_pool_size, original_shp_size, None) +def test_port_auto_neg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test): + """The test case for auto negotiation enabled ports + + For those ports, the speed which is taken into account for buffer calculating is no longer the configure speed but + - The maximum supported speed if advertised-speeds is not configured + - The maximum advertised speed otherwise + + Args: + port_to_test: Port to run the test + + The flow of the test case: + 1. Fetch the supported_speeds from STATE_DB. It's exposed by port auto negotiation feature when system starts. + Skip the test if it is not exposed. + 2. Preparing: + - Configure the speed to the minimum supported one and the cable length to 15m + - This is to enforce there is a new buffer profile created + 3. Enable the port auto negotiation and then configure the advertised speed list and then disable it + - The maximum supported speed should be taken into account for buffer calculation after port auto negotiation enabled + - The maximum advertised speed should be taken after it is configured + - The configured speed should be taken after the port auto negotiation is disabled + 4. Enable the port auto negotiation with the advertised speed list configured + - The maximum advertised speed should be taken after it is configured + 5. Add a new PG. + - The maximum advertised speed should be taken in this case + 6. Configure advertised speed as all + - The maximum supported speed should be taken into account for buffer calculation + + Note: + The buffer pool size is not verified in this test because: + - Only the logic to generate effective speed is updated in port auto-negotiation, + which will affect only the buffer priority-groups and profiles on the port, which is verified in the test. + - The buffer pool size depends on the buffer priority-groups and profiles but not directly on the effective speed. + As buffer pool size has been verified in other test cases and checking it will consume more time, we don't repeat it here. + """ + def _get_max_speed_from_list(speed_list_str): + speed_list = natsorted(speed_list_str.split(',')) + return speed_list[-1] + + duthost = duthosts[rand_one_dut_hostname] + supported_speeds = duthost.shell('redis-cli -n 6 hget "PORT_TABLE|{}" supported_speeds'.format(port_to_test))['stdout'] + if not supported_speeds: + pytest.skip('No supported_speeds found for port {}, skip the test'.format(port_to_test))['stdout'] + original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout'] + original_cable_length = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout'] + original_pool_size = duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool size')['stdout'] + if DEFAULT_OVER_SUBSCRIBE_RATIO: + original_shp_size = int(duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool xoff')['stdout']) + else: + original_shp_size = None + + max_supported_speed = _get_max_speed_from_list(supported_speeds) + supported_speeds_list = natsorted(supported_speeds.split(',')) + speed_before_test = supported_speeds_list[0] + cable_length_to_test = '15m' + advertised_speeds_to_test = ','.join(supported_speeds_list[:-1]) + max_advertised_speed = _get_max_speed_from_list(advertised_speeds_to_test) + + initial_asic_db_profiles = fetch_initial_asic_db(duthost) + expected_profile = make_expected_profile_name(speed_before_test, cable_length_to_test) + try: + # Preparing: configure the speed to one which is not the maximum speed and the cable length to 15m + # This is to enforce there is a new buffer profile created + duthost.shell('config interface speed {} {}'.format(port_to_test, speed_before_test)) + duthost.shell('config interface cable-length {} {}'.format(port_to_test, cable_length_to_test)) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + new_profile_id, pool_id = check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, None, None) + + # As comments at the beginning of the method, we don't check buffer pool size in this test case. + # The same for all the following steps. + + # Enable port auto negotiation first and then configure the advertised speed list + logging.info('Enable port auto negotiation') + duthost.shell('config interface autoneg {} enabled'.format(port_to_test)) + # Check whether the maximum supported speed is used for creating lossless profile + expected_profile = make_expected_profile_name(max_supported_speed, cable_length_to_test) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, new_profile_id, pool_id) + + # Configure advertised speeds + logging.info('Update advertised speeds to {}'.format(advertised_speeds_to_test)) + duthost.shell('config interface advertised-speeds {} {}'.format(port_to_test, advertised_speeds_to_test)) + # Check whether the maximum advertised speed is used for creating lossless profile + expected_profile = make_expected_profile_name(max_advertised_speed, cable_length_to_test) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, new_profile_id, pool_id) + + # Disable port auto negotiation + logging.info('Disable port auto negotiation') + duthost.shell('config interface autoneg {} disabled'.format(port_to_test)) + expected_profile = make_expected_profile_name(speed_before_test, cable_length_to_test) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, new_profile_id, pool_id) + + # Enable port auto negotiation with advertised speed configured + logging.info('Reenable port auto negotiation with advertised speeds configured') + duthost.shell('config interface autoneg {} enabled'.format(port_to_test)) + # Check whether the maximum advertised speed is used for creating lossless profile + expected_profile = make_expected_profile_name(max_advertised_speed, cable_length_to_test) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, new_profile_id, pool_id) + + # Add new PGs. The maximum advertised speed should be used + logging.info('Add new PG 6') + duthost.shell('config interface buffer priority-group lossless add {} 6'.format(port_to_test)) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:6'.format(port_to_test), expected_profile) + + # Update the advertised speed to all + logging.info('Update advertised speeds to all') + duthost.shell('config interface advertised-speeds {} all'.format(port_to_test)) + expected_profile = make_expected_profile_name(max_supported_speed, cable_length_to_test) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile) + check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:6'.format(port_to_test), expected_profile) + check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, new_profile_id, pool_id) + finally: + # Clean up + duthost.shell('config interface buffer priority-group lossless remove {} 6'.format(port_to_test), module_ignore_errors=True) + duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_length), module_ignore_errors=True) + duthost.shell('config interface speed {} {}'.format(port_to_test, original_speed), module_ignore_errors=True) + duthost.shell('config interface advertised-speeds {} all'.format(port_to_test), module_ignore_errors=True) + duthost.shell('config interface autoneg {} disabled'.format(port_to_test), module_ignore_errors=True) + ensure_pool_size(duthost, 60, original_pool_size, original_shp_size, None) + + @pytest.mark.disable_loganalyzer def test_exceeding_headroom(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test): """The test case for maximum headroom From ebb404064b6983c9620ddb549656b566e9d71703 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Thu, 29 Jul 2021 16:41:31 +0800 Subject: [PATCH 025/117] [test_bgp_speaker] Skip for dualtor testbeds (#3852) Approach What is the motivation for this PR? Skip test_bgp_speaker for dualtor testbeds. Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you do it? Define an autouse fixture skip_dualtor to check the testbed name. --- tests/bgp/test_bgp_speaker.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/bgp/test_bgp_speaker.py b/tests/bgp/test_bgp_speaker.py index 8b2e8fa03fb..14fd1f2b7ed 100644 --- a/tests/bgp/test_bgp_speaker.py +++ b/tests/bgp/test_bgp_speaker.py @@ -14,6 +14,8 @@ from tests.common.dualtor.mux_simulator_control import mux_server_url # lgtm[py/unused-import] from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports from tests.common.dualtor.dual_tor_utils import map_hostname_to_tor_side +from tests.common.helpers.assertions import pytest_require + pytestmark = [ pytest.mark.topology('t0'), @@ -55,6 +57,13 @@ def change_route(operation, ptfip, neighbor, route, nexthop, port): r = requests.post(url, data=data) assert r.status_code == 200 + +@pytest.fixture(scope="module", autouse=True) +def skip_dualtor(tbinfo): + """Skip running `test_bgp_speaker` over dualtor.""" + pytest_require("dualtor" not in tbinfo["topo"]["name"], "Skip 'test_bgp_speaker over dualtor.'") + + @pytest.fixture(scope="module") def common_setup_teardown(duthosts, rand_one_dut_hostname, ptfhost, localhost, tbinfo, toggle_all_simulator_ports): From 0de45d224be35dc411e59ffa86d2d3c48df0c13b Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Thu, 29 Jul 2021 13:36:28 -0700 Subject: [PATCH 026/117] [multi-asic] skip the neighbor check if the neighbor is ASIC(#3908) What is the motivation for this PR? Currently the test test_nbr_health is failing on multi asic platform. This is because the test tries to incorrectly tries to find internal neighbors in nbrhost list for the DUT. How did you do it? exclude internal neigbors from the check in the test How did you verify/test it? run the tests in test_nbr_health.py on single and multi asic platforms Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- tests/test_nbr_health.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_nbr_health.py b/tests/test_nbr_health.py index ebc1ce5d0e2..45ded87941a 100644 --- a/tests/test_nbr_health.py +++ b/tests/test_nbr_health.py @@ -88,11 +88,12 @@ def test_neighbors_health(duthosts, localhost, nbrhosts, eos, sonic, enum_fronte dut_type = dev_meta["localhost"]["type"] for k, v in nei_meta.items(): - if v['type'] in ['SmartCable', 'Server'] or dut_type == v['type']: + if v['type'] in ['SmartCable', 'Server', 'Asic'] or dut_type == v['type']: # Smart cable doesn't respond to snmp, it doesn't have BGP session either. # DualToR has the peer ToR listed in device as well. If the device type # is the same as testing DUT, then it is the peer. # The server neighbors need to be skipped too. + # Skip if the neigbhor is asic as well. continue nbrhost = nbrhosts[k]['host'] From fb68bc5163d848d54d63f5e41b821f8790afbd6e Mon Sep 17 00:00:00 2001 From: Neetha John Date: Thu, 29 Jul 2021 18:08:14 -0700 Subject: [PATCH 027/117] [rdma] Skip nbrhosts fixture if no VM is present (#3913) Tgen topology used for RDMA testcase runs does not have any VMs defined which was causing those testcases to fail after this change #3656 Signed-off-by: Neetha John How did you do it? Skip nbrhosts fixture if no VMs are present in the topology How did you verify/test it? Executed 'tgen' testcases with the fix and they passed --- tests/conftest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 7878527b85a..43e05d14f87 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -364,9 +364,13 @@ def nbrhosts(ansible_adhoc, tbinfo, creds, request): Shortcut fixture for getting VM host """ + devices = {} + if not tbinfo['vm_base'] and 'tgen' in tbinfo['topo']['name']: + logger.info("No VMs exist for this topology: {}".format(tbinfo['topo']['name'])) + return devices + vm_base = int(tbinfo['vm_base'][2:]) neighbor_type = request.config.getoption("--neighbor_type") - devices = {} for k, v in tbinfo['topo']['properties']['topology']['VMs'].items(): if neighbor_type == "eos": devices[k] = {'host': EosHost(ansible_adhoc, From 0cedade1a50dbe2a41ef8a490a7af19977e8c6bb Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Fri, 30 Jul 2021 09:14:05 +0800 Subject: [PATCH 028/117] [test_fib] Enable storage backend topologies (#3909) Approach What is the motivation for this PR? Restore changes of #3734 that are removed by #3702 . How did you do it? How did you verify/test it? --- tests/common/fixtures/fib_utils.py | 17 +++++++++++++---- tests/fib/test_fib.py | 1 + 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/common/fixtures/fib_utils.py b/tests/common/fixtures/fib_utils.py index ebaad821248..86091132f5f 100644 --- a/tests/common/fixtures/fib_utils.py +++ b/tests/common/fixtures/fib_utils.py @@ -115,6 +115,7 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): po = asic_cfg_facts.get('PORTCHANNEL', {}) ports = asic_cfg_facts.get('PORT', {}) + sub_interfaces = asic_cfg_facts.get('VLAN_SUB_INTERFACE', {}) with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: fib = json.load(fp) @@ -135,7 +136,9 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): else: oports.append([str(duts_mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) else: - if ports.has_key(ifname): + if sub_interfaces.has_key(ifname): + oports.append([str(duts_mg_facts['minigraph_ptf_indices'][ifname.split('.')[0]])]) + elif ports.has_key(ifname): if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int': skip = True else: @@ -181,8 +184,8 @@ def gen_fib_info_file(ptfhost, fib_info, filename): ptfhost.copy(src=tmp_fib_info.name, dest=filename) -@pytest.fixture(scope='module') -def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_facts, tbinfo): +@pytest.fixture(scope='function') +def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_facts, tbinfo, request): """Get FIB info from database and store to text files on PTF host. For T2 topology, generate a single file to /root/fib_info_all_duts.txt to PTF host. @@ -200,11 +203,17 @@ def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_ list: List of FIB info file names on PTF host. """ duts_config_facts = duts_running_config_facts + testname = request.node.name files = [] if tbinfo['topo']['type'] != "t2": for dut_index, duthost in enumerate(duthosts): fib_info = get_fib_info(duthost, duts_config_facts[duthost.hostname], duts_minigraph_facts[duthost.hostname]) - filename = '/root/fib_info_dut{}.txt'.format(dut_index) + if 'test_basic_fib' in testname and 'backend' in tbinfo['topo']['name']: + # if it is a storage backend topology(bt0 or bt1) and testcase is test_basic_fib + # add a default route as failover in the prefix matching + fib_info[u'0.0.0.0/0'] = [] + fib_info[u'::/0'] = [] + filename = '/root/fib_info_dut_{0}_{1}.txt'.format(testname, dut_index) gen_fib_info_file(ptfhost, fib_info, filename) files.append(filename) else: diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index a3b8e502c0b..f281e6cbdd0 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,6 +10,7 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import ptf_test_port_map from tests.ptf_runner import ptf_runner from tests.common.helpers.assertions import pytest_assert From 57ffe6848514e4540e37a5d8270031abd1e9070d Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Thu, 29 Jul 2021 22:02:37 -0400 Subject: [PATCH 029/117] [vlan]: Dual ToR compatibility (#3892) - Use minigraph-ptf-indices to obtain mapping from DUT port to PTF port - For dual ToR platforms, pick a single ToR for testing and make all ports active on that ToR (necessary since the test - involves shutting down BGP sessions, so no IP-in-IP tunnel is available) - Clean up test setup and teardown Signed-off-by: Lawrence Lee --- tests/vlan/test_vlan.py | 194 +++++++++++++++++++++------------------- 1 file changed, 100 insertions(+), 94 deletions(-) diff --git a/tests/vlan/test_vlan.py b/tests/vlan/test_vlan.py index f2fdef58742..4073e41039a 100644 --- a/tests/vlan/test_vlan.py +++ b/tests/vlan/test_vlan.py @@ -5,7 +5,6 @@ from ptf.mask import Mask from collections import defaultdict -import time import json import itertools import logging @@ -14,6 +13,8 @@ from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # lgtm[py/unused-import] + from tests.common.config_reload import config_reload logger = logging.getLogger(__name__) @@ -37,18 +38,16 @@ def vlan_intfs_list(): @pytest.fixture(scope="module") -def vlan_ports_list(cfg_facts, ptfhost): +def vlan_ports_list(rand_selected_dut, tbinfo, cfg_facts): + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) vlan_ports_list = [] - config_ports = cfg_facts['PORT'] + config_ports = {k: v for k,v in cfg_facts['PORT'].items() if v.get('admin_status', 'down') == 'up'} config_portchannels = cfg_facts.get('PORTCHANNEL', {}) - config_port_indices = cfg_facts['port_index_map'] - ptf_ports_available_in_topo = ptfhost.host.options['variable_manager'].extra_vars.get("ifaces_map") - + config_port_indices = {k: v for k, v in mg_facts['minigraph_ptf_indices'].items() if k in config_ports} + ptf_ports_available_in_topo = {port_index: 'eth{}'.format(port_index) for port_index in config_port_indices.values()} config_port_channel_members = [port_channel[1]['members'] for port_channel in config_portchannels.items()] config_port_channel_member_ports = list(itertools.chain.from_iterable(config_port_channel_members)) - pvid_cycle = itertools.cycle(vlan_id_list) - # when running on t0 we can use the portchannel members if config_portchannels: for po in config_portchannels.keys()[:2]: @@ -82,7 +81,8 @@ def vlan_ports_list(cfg_facts, ptfhost): return vlan_ports_list -def create_vlan_interfaces(vlan_ports_list, vlan_intfs_list, duthost, ptfhost): +def create_vlan_interfaces(vlan_ports_list, ptfhost): + logger.info("Create PTF VLAN intfs") for vlan_port in vlan_ports_list: for permit_vlanid in vlan_port["permit_vlanid"].keys(): if int(permit_vlanid) != vlan_port["pvid"]: @@ -97,101 +97,94 @@ def create_vlan_interfaces(vlan_ports_list, vlan_intfs_list, duthost, ptfhost): pvid=permit_vlanid )) +def shutdown_portchannels(duthost, portchannel_interfaces): + cmds = [] + logger.info("Shutdown lags, flush IP addresses") + for portchannel, ips in portchannel_interfaces.items(): + cmds.append('config interface shutdown {}'.format(portchannel)) + for ip in ips: + cmds.append('config interface ip remove {} {}'.format(portchannel, ip)) + + duthost.shell_cmds(cmds=cmds) + +def create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list): + cmds = [] + logger.info("Add vlans, assign IPs") + for vlan in vlan_intfs_list: + cmds.append('config vlan add {}'.format(vlan['vlan_id'])) + cmds.append("config interface ip add Vlan{} {}".format(vlan['vlan_id'], vlan['ip'].upper())) + + # Delete untagged vlans from interfaces to avoid error message + # when adding untagged vlan to interface that already have one + if '201911' not in duthost.os_version: + logger.info("Delete untagged vlans from interfaces") + for vlan_port in vlan_ports_list: + vlan_members = cfg_facts.get('VLAN_MEMBER', {}) + vlan_name, vid = vlan_members.keys()[0], vlan_members.keys()[0].replace("Vlan", '') + try: + if vlan_members[vlan_name][vlan_port['dev']]['tagging_mode'] == 'untagged': + cmds.append("config vlan member del {} {}".format(vid, vlan_port['dev'])) + except KeyError: + continue + + logger.info("Add members to Vlans") + for vlan_port in vlan_ports_list: + for permit_vlanid in vlan_port['permit_vlanid'].keys(): + cmds.append('config vlan member add {tagged} {id} {port}'.format( + tagged=('--untagged' if vlan_port['pvid'] == permit_vlanid else ''), + id=permit_vlanid, + port=vlan_port['dev'] + )) + + duthost.shell_cmds(cmds=cmds) + +def startup_portchannels(duthost, portchannel_interfaces): + cmds =[] + logger.info("Bringup lags") + for portchannel in portchannel_interfaces: + cmds.append('config interface startup {}'.format(portchannel)) + + duthost.shell_cmds(cmds=cmds) + +def add_test_routes(duthost, vlan_ports_list): + cmds = [] + logger.info("Configure route for remote IP") + for item in vlan_ports_list: + for i in vlan_ports_list[0]['permit_vlanid']: + cmds.append('ip route add {} via {}'.format( + item['permit_vlanid'][i]['remote_ip'], + item['permit_vlanid'][i]['peer_ip'] + )) + + duthost.shell_cmds(cmds=cmds) @pytest.fixture(scope="module", autouse=True) -def setup_vlan(ptfadapter, duthosts, rand_one_dut_hostname, ptfhost, vlan_ports_list, vlan_intfs_list, cfg_facts): +def setup_vlan(duthosts, rand_one_dut_hostname, ptfhost, vlan_ports_list, vlan_intfs_list, cfg_facts): duthost = duthosts[rand_one_dut_hostname] # --------------------- Setup ----------------------- try: - # Generate vlan info portchannel_interfaces = cfg_facts.get('PORTCHANNEL_INTERFACE', {}) - logger.info("Shutdown lags, flush IP addresses") - for portchannel, ips in portchannel_interfaces.items(): - duthost.command('config interface shutdown {}'.format(portchannel)) - for ip in ips: - duthost.command('config interface ip remove {} {}'.format(portchannel, ip)) - - # Wait some time for route, neighbor, next hop groups to be removed, - # otherwise PortChannel RIFs are still referenced and won't be removed - time.sleep(90) - - logger.info("Add vlans, assign IPs") - for vlan in vlan_intfs_list: - duthost.command('config vlan add {}'.format(vlan['vlan_id'])) - duthost.command("config interface ip add Vlan{} {}".format(vlan['vlan_id'], vlan['ip'].upper())) - - # Delete untagged vlans from interfaces to avoid error message - # when adding untagged vlan to interface that already have one - if '201911' not in duthost.os_version: - logger.info("Delete untagged vlans from interfaces") - for vlan_port in vlan_ports_list: - vlan_members = cfg_facts.get('VLAN_MEMBER', {}) - vlan_name, vid = vlan_members.keys()[0], vlan_members.keys()[0].replace("Vlan", '') - try: - if vlan_members[vlan_name][vlan_port['dev']]['tagging_mode'] == 'untagged': - duthost.command("config vlan member del {} {}".format(vid, vlan_port['dev'])) - except KeyError: - continue - - logger.info("Add members to Vlans") - for vlan_port in vlan_ports_list: - for permit_vlanid in vlan_port['permit_vlanid'].keys(): - duthost.command('config vlan member add {tagged} {id} {port}'.format( - tagged=('--untagged' if vlan_port['pvid'] == permit_vlanid else ''), - id=permit_vlanid, - port=vlan_port['dev'] - )) - - # Make sure config applied - time.sleep(30) + shutdown_portchannels(duthost, portchannel_interfaces) - logger.info("Bringup lags") - for portchannel in portchannel_interfaces: - duthost.command('config interface startup {}'.format(portchannel)) + create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list) - # Make sure config applied - time.sleep(30) + startup_portchannels(duthost, portchannel_interfaces) - logger.info("Create VLAN intf") - create_vlan_interfaces(vlan_ports_list, vlan_intfs_list, duthost, ptfhost) + create_vlan_interfaces(vlan_ports_list, ptfhost) - logger.info("Configure route for remote IP") - for item in vlan_ports_list: - for i in vlan_ports_list[0]['permit_vlanid']: - duthost.command('ip route add {} via {}'.format( - item['permit_vlanid'][i]['remote_ip'], - item['permit_vlanid'][i]['peer_ip'] - )) - - logger.info("Copy arp_responder to ptfhost") + add_test_routes(duthost, vlan_ports_list) setUpArpResponder(vlan_ports_list, ptfhost) - extra_vars = { - 'arp_responder_args': '' - } - - ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) - ptfhost.template(src='templates/arp_responder.conf.j2', dest='/tmp') - ptfhost.command("cp /tmp/arp_responder.conf.j2 /etc/supervisor/conf.d/arp_responder.conf") - - ptfhost.command('supervisorctl reread') - ptfhost.command('supervisorctl update') - - logger.info("Start arp_responder") - ptfhost.command('supervisorctl start arp_responder') - - time.sleep(10) - # --------------------- Testing ----------------------- yield # --------------------- Teardown ----------------------- finally: - tearDown(vlan_ports_list, duthost, ptfhost, vlan_intfs_list, portchannel_interfaces) + tearDown(vlan_ports_list, duthost, ptfhost) -def tearDown(vlan_ports_list, duthost, ptfhost, vlan_intfs_list, portchannel_interfaces): +def tearDown(vlan_ports_list, duthost, ptfhost): logger.info("VLAN test ending ...") logger.info("Stop arp_responder") @@ -210,24 +203,37 @@ def tearDown(vlan_ports_list, duthost, ptfhost, vlan_intfs_list, portchannel_int logger.error(e) config_reload(duthost) - # make sure Portchannels go up for post-test link sanity - time.sleep(90) - def setUpArpResponder(vlan_ports_list, ptfhost): + logger.info("Copy arp_responder to ptfhost") d = defaultdict(list) for vlan_port in vlan_ports_list: for permit_vlanid in vlan_port["permit_vlanid"].keys(): if int(permit_vlanid) == vlan_port["pvid"]: iface = "eth{}".format(vlan_port["port_index"][0]) else: - iface = "eth{}.{}".format(vlan_port["port_index"][0], permit_vlanid) + iface = "eth{}".format(vlan_port["port_index"][0]) + # iface = "eth{}.{}".format(vlan_port["port_index"][0], permit_vlanid) d[iface].append(vlan_port["permit_vlanid"][permit_vlanid]["peer_ip"]) with open('/tmp/from_t1.json', 'w') as file: json.dump(d, file) ptfhost.copy(src='/tmp/from_t1.json', dest='/tmp/from_t1.json') + extra_vars = { + 'arp_responder_args': '' + } + + ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) + ptfhost.template(src='templates/arp_responder.conf.j2', dest='/tmp') + ptfhost.command("cp /tmp/arp_responder.conf.j2 /etc/supervisor/conf.d/arp_responder.conf") + + ptfhost.command('supervisorctl reread') + ptfhost.command('supervisorctl update') + + logger.info("Start arp_responder") + ptfhost.command('supervisorctl start arp_responder') + def build_icmp_packet(vlan_id, src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff", src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64): @@ -314,7 +320,7 @@ def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): @pytest.mark.bsl -def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list): +def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #1 Verify packets egress without tag from ports whose PVID same with ingress port @@ -332,7 +338,7 @@ def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list): @pytest.mark.bsl -def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list): +def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #2 Send tagged packets from each port. @@ -352,7 +358,7 @@ def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list): @pytest.mark.bsl -def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list): +def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #3 Send packets with invalid VLAN ID @@ -377,7 +383,7 @@ def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list): @pytest.mark.bsl -def test_vlan_tc4_tagged_non_broadcast(ptfadapter, vlan_ports_list): +def test_vlan_tc4_tagged_non_broadcast(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #4 Send packets w/ src and dst specified over tagged ports in vlan @@ -432,7 +438,7 @@ def test_vlan_tc4_tagged_non_broadcast(ptfadapter, vlan_ports_list): @pytest.mark.bsl -def test_vlan_tc5_untagged_non_broadcast(ptfadapter, vlan_ports_list, duthost): +def test_vlan_tc5_untagged_non_broadcast(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #5 Send packets w/ src and dst specified over untagged ports in vlan @@ -486,7 +492,7 @@ def test_vlan_tc5_untagged_non_broadcast(ptfadapter, vlan_ports_list, duthost): logger.info("Untagged packet successfully sent from port {} to port {}".format(dst_port[0], src_port)) -def test_vlan_tc6_tagged_qinq_switch_on_outer_tag(ptfadapter, vlan_ports_list, duthost): +def test_vlan_tc6_tagged_qinq_switch_on_outer_tag(ptfadapter, vlan_ports_list, duthost, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #6 Send qinq packets w/ src and dst specified over tagged ports in vlan From d2e336fe0735f17dfc0f2183475020e0e8dc25f8 Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Thu, 29 Jul 2021 23:31:49 -0700 Subject: [PATCH 030/117] Add wait for critical services in health check for advanced reboot (#3915) What is the motivation for this PR? There is a health check of the dut recently introduced to advanced reboot tests which checks the status of services including snmp. However, snmp starts three minutes after start-up and the advanced reboot tests do not have a dependency on snmp start. It follows that there is a racing condition between the test cases check the state of snmp and snmp actually starts. If the test case checks snmp state earlier that it starts, the test would fail even though it would come up a few moments later. This PR aims to fix the false alarm. How did you do it? Add wait for critical services in the health check for advanced reboot tests. --- tests/platform_tests/verify_dut_health.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/platform_tests/verify_dut_health.py b/tests/platform_tests/verify_dut_health.py index 07882fcc7ec..30a69198ebb 100644 --- a/tests/platform_tests/verify_dut_health.py +++ b/tests/platform_tests/verify_dut_health.py @@ -33,6 +33,8 @@ def check_services(duthost): Perform a health check of services """ logging.info("Wait until all critical services are fully started") + # Wait until 300 seconds after boot up since a part of the services has delayed start (e.g., snmp) + wait_until_uptime(duthost, 300) logging.info("Check critical service status") if not duthost.critical_services_fully_started(): From 9ecd497a9b85c8a132a9899090b99316b685b450 Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Fri, 30 Jul 2021 09:31:24 -0700 Subject: [PATCH 031/117] Update VLAN prefixes (#3898) What is the motivation for this PR? The two-VLAN configurations use 192.168.100.1/21 and 192.168.200.1/21 as VLAN prefixes. Such prefixes conflict with the BGP routes (e.g., 192.168.200.0/25). It follows that part of the VLAN addresses may be overridden by BGP routes and become unreachable. This PR aims to fix the issue by avoiding overlap between VLAN prefixes and BGP routes. How did you do it? Update VLAN prefixed to avoid conflict with BGP routes How did you verify/test it? Verify that VLAN addresses are not overlapped with BGP routes and are reachable. --- ansible/vars/topo_dualtor-120.yml | 4 ++-- ansible/vars/topo_dualtor-56.yml | 4 ++-- ansible/vars/topo_dualtor.yml | 4 ++-- ansible/vars/topo_mgmttor.yml | 4 ++-- ansible/vars/topo_t0-116.yml | 4 ++-- ansible/vars/topo_t0-120.yml | 4 ++-- ansible/vars/topo_t0-16.yml | 4 ++-- ansible/vars/topo_t0-35.yml | 4 ++-- ansible/vars/topo_t0-52.yml | 4 ++-- ansible/vars/topo_t0-56-po2vlan.yml | 4 ++-- ansible/vars/topo_t0-56.yml | 4 ++-- ansible/vars/topo_t0-64-32.yml | 4 ++-- ansible/vars/topo_t0-64.yml | 4 ++-- ansible/vars/topo_t0-80.yml | 4 ++-- ansible/vars/topo_t0-backend.yml | 4 ++-- ansible/vars/topo_t0.yml | 4 ++-- ansible/vars/topo_tgen-t0-3.yml | 4 ++-- ansible/vars/topo_tgen-t0-35-3.yml | 4 ++-- 18 files changed, 36 insertions(+), 36 deletions(-) diff --git a/ansible/vars/topo_dualtor-120.yml b/ansible/vars/topo_dualtor-120.yml index e3128efbe8c..fe498712046 100644 --- a/ansible/vars/topo_dualtor-120.yml +++ b/ansible/vars/topo_dualtor-120.yml @@ -242,13 +242,13 @@ topology: Vlan100: id: 100 intfs: [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 tunnel_configs: diff --git a/ansible/vars/topo_dualtor-56.yml b/ansible/vars/topo_dualtor-56.yml index a9591ba9702..a24c40faf22 100644 --- a/ansible/vars/topo_dualtor-56.yml +++ b/ansible/vars/topo_dualtor-56.yml @@ -146,13 +146,13 @@ topology: Vlan100: id: 100 intfs: [0, 2, 4, 6, 8, 10, 16, 18, 20, 22, 24, 26] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [28, 30, 32, 34, 36, 38, 44, 46, 48, 50, 52, 54] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 tunnel_configs: diff --git a/ansible/vars/topo_dualtor.yml b/ansible/vars/topo_dualtor.yml index 051df1b9965..1800c74e698 100644 --- a/ansible/vars/topo_dualtor.yml +++ b/ansible/vars/topo_dualtor.yml @@ -98,13 +98,13 @@ topology: Vlan100: id: 100 intfs: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 tunnel_configs: diff --git a/ansible/vars/topo_mgmttor.yml b/ansible/vars/topo_mgmttor.yml index fb4e75baaf7..77a131b111d 100644 --- a/ansible/vars/topo_mgmttor.yml +++ b/ansible/vars/topo_mgmttor.yml @@ -128,13 +128,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-116.yml b/ansible/vars/topo_t0-116.yml index d56effb68da..0331aca0666 100644 --- a/ansible/vars/topo_t0-116.yml +++ b/ansible/vars/topo_t0-116.yml @@ -147,13 +147,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-120.yml b/ansible/vars/topo_t0-120.yml index c0f90f25d64..770faec9d77 100644 --- a/ansible/vars/topo_t0-120.yml +++ b/ansible/vars/topo_t0-120.yml @@ -173,13 +173,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 50, 51] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [52, 56, 57, 58, 62, 63, 64, 68, 69, 70, 72, 73, 74, 75, 76, 78, 80, 82, 84, 85, 86, 89, 90, 92, 93, 94, 97, 99, 100, 101, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-16.yml b/ansible/vars/topo_t0-16.yml index d242cadab65..91116fd5c55 100644 --- a/ansible/vars/topo_t0-16.yml +++ b/ansible/vars/topo_t0-16.yml @@ -120,13 +120,13 @@ topology: Vlan100: id: 100 intfs: [32, 33, 34, 35, 36, 37, 38, 39] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-35.yml b/ansible/vars/topo_t0-35.yml index 7500c856699..8bb5af3d394 100644 --- a/ansible/vars/topo_t0-35.yml +++ b/ansible/vars/topo_t0-35.yml @@ -70,13 +70,13 @@ topology: Vlan100: id: 100 intfs: [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-52.yml b/ansible/vars/topo_t0-52.yml index 78092c92390..0c2b26ad574 100644 --- a/ansible/vars/topo_t0-52.yml +++ b/ansible/vars/topo_t0-52.yml @@ -79,13 +79,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-56-po2vlan.yml b/ansible/vars/topo_t0-56-po2vlan.yml index 87bcd069361..5d1c1d922c1 100644 --- a/ansible/vars/topo_t0-56-po2vlan.yml +++ b/ansible/vars/topo_t0-56-po2vlan.yml @@ -119,14 +119,14 @@ topology: id: 101 intfs: [] portchannels: ['PortChannel101'] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 101 Vlan102: id: 102 intfs: [8, 10, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 44, 46, 48, 50, 52, 54] portchannels: ['PortChannel101'] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 102 diff --git a/ansible/vars/topo_t0-56.yml b/ansible/vars/topo_t0-56.yml index e24d0a77794..9a1f59d9697 100644 --- a/ansible/vars/topo_t0-56.yml +++ b/ansible/vars/topo_t0-56.yml @@ -120,13 +120,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-64-32.yml b/ansible/vars/topo_t0-64-32.yml index 6b88de1234e..62be887a8f8 100644 --- a/ansible/vars/topo_t0-64-32.yml +++ b/ansible/vars/topo_t0-64-32.yml @@ -59,13 +59,13 @@ topology: Vlan100: id: 100 intfs: [2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-64.yml b/ansible/vars/topo_t0-64.yml index 774e29bca41..af740635d5b 100644 --- a/ansible/vars/topo_t0-64.yml +++ b/ansible/vars/topo_t0-64.yml @@ -112,13 +112,13 @@ topology: Vlan100: id: 100 intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-80.yml b/ansible/vars/topo_t0-80.yml index 0340ecfce5d..3beb109c6cc 100644 --- a/ansible/vars/topo_t0-80.yml +++ b/ansible/vars/topo_t0-80.yml @@ -122,13 +122,13 @@ topology: Vlan100: id: 100 intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_t0-backend.yml b/ansible/vars/topo_t0-backend.yml index 799a75f7825..eadef8a4678 100644 --- a/ansible/vars/topo_t0-backend.yml +++ b/ansible/vars/topo_t0-backend.yml @@ -93,14 +93,14 @@ topology: Vlan100: id: 100 intfs: [4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 type: tagged Vlan200: id: 200 intfs: [14, 15, 16, 17, 18, 19, 20, 21, 22] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 type: tagged diff --git a/ansible/vars/topo_t0.yml b/ansible/vars/topo_t0.yml index c19b682a610..b4f36eda50f 100644 --- a/ansible/vars/topo_t0.yml +++ b/ansible/vars/topo_t0.yml @@ -64,13 +64,13 @@ topology: Vlan100: id: 100 intfs: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_tgen-t0-3.yml b/ansible/vars/topo_tgen-t0-3.yml index 281d58bc8f4..b1acd96be8c 100644 --- a/ansible/vars/topo_tgen-t0-3.yml +++ b/ansible/vars/topo_tgen-t0-3.yml @@ -52,13 +52,13 @@ topology: Vlan100: id: 100 intfs: [1] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [2] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 diff --git a/ansible/vars/topo_tgen-t0-35-3.yml b/ansible/vars/topo_tgen-t0-35-3.yml index 30b85a52278..0208979fb84 100644 --- a/ansible/vars/topo_tgen-t0-35-3.yml +++ b/ansible/vars/topo_tgen-t0-35-3.yml @@ -55,13 +55,13 @@ topology: Vlan100: id: 100 intfs: [5] - prefix: 192.168.100.1/21 + prefix: 192.168.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: id: 200 intfs: [6] - prefix: 192.168.200.1/21 + prefix: 192.168.4.1/22 prefix_v6: fc02:200::1/64 tag: 200 From 28da508e6f82fe3b3835c8913f4cd14ef03f62b5 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Sat, 31 Jul 2021 00:19:30 -0700 Subject: [PATCH 032/117] Skip default route testcases on backend T1 (#3921) Backend T1 topology has no default route. Hence these testcases should be skipped. Signed-off-by: Neetha John --- tests/route/test_default_route.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/route/test_default_route.py b/tests/route/test_default_route.py index 4c8aef56945..c19cfe03b4d 100644 --- a/tests/route/test_default_route.py +++ b/tests/route/test_default_route.py @@ -1,7 +1,7 @@ import pytest import ipaddress import logging -from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.assertions import pytest_assert, pytest_require pytestmark = [ pytest.mark.topology('any'), @@ -10,11 +10,13 @@ logger = logging.getLogger(__name__) -def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index): +def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index, tbinfo): """ check if ipv4 and ipv6 default src address match Loopback0 address """ + pytest_require('t1-backend' not in tbinfo['topo']['name'], "Skip this testcase since this topology {} has no default routes".format(tbinfo['topo']['name'])) + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asichost = duthost.asic_instance(enum_asic_index) @@ -46,11 +48,13 @@ def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostna pytest_assert(rtinfo['set_src'] == lo_ipv6.ip, \ "default v6 route set src to wrong IP {} != {}".format(rtinfo['set_src'], lo_ipv6.ip)) -def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index): +def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index, tbinfo): """ check if ipv6 default route nexthop address uses global address """ + pytest_require('t1-backend' not in tbinfo['topo']['name'], "Skip this testcase since this topology {} has no default routes".format(tbinfo['topo']['name'])) + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asichost = duthost.asic_instance(enum_asic_index) From 4e1bba11d320c8c31ebf99bf91f2be753c29d581 Mon Sep 17 00:00:00 2001 From: Chitra Raghavan <32665166+chitra-raghavan@users.noreply.github.com> Date: Sat, 31 Jul 2021 12:52:55 +0530 Subject: [PATCH 033/117] [Platform API] : modified test_sfp for 400G ports (#3888) - For QSFD-DD, transceiver type is mapped as active or passive under specification compliance. Modified script for this change. - for Tx-bias , included check to verify whether the transceiver is optical . --- tests/platform_tests/api/test_sfp.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/platform_tests/api/test_sfp.py b/tests/platform_tests/api/test_sfp.py index 311144d125a..fbf9c5a4c47 100644 --- a/tests/platform_tests/api/test_sfp.py +++ b/tests/platform_tests/api/test_sfp.py @@ -164,10 +164,20 @@ def compare_value_with_platform_facts(self, key, value, sfp_idx, duthost): def is_xcvr_optical(self, xcvr_info_dict): """Returns True if transceiver is optical, False if copper (DAC)""" - spec_compliance_dict = ast.literal_eval(xcvr_info_dict["specification_compliance"]) - compliance_code = spec_compliance_dict.get("10/40G Ethernet Compliance Code") - if compliance_code == "40GBASE-CR4": - return False + #For QSFP-DD specification compliance will return type as passive or active + if xcvr_info_dict["type_abbrv_name"] == "QSFP-DD": + if xcvr_info_dict["specification_compliance"] == "passive_copper_media_interface": + return False + else: + spec_compliance_dict = ast.literal_eval(xcvr_info_dict["specification_compliance"]) + if xcvr_info_dict["type_abbrv_name"] == "SFP": + compliance_code = spec_compliance_dict.get("SFP+CableTechnology") + if compliance_code == "Passive Cable": + return False + else: + compliance_code = spec_compliance_dict.get("10/40G Ethernet Compliance Code") + if compliance_code == "40GBASE-CR4": + return False return True def is_xcvr_resettable(self, xcvr_info_dict): @@ -335,6 +345,11 @@ def test_get_voltage(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost def test_get_tx_bias(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # TODO: Do more sanity checking on the data we retrieve for i in self.candidate_sfp: + info_dict = sfp.get_transceiver_info(platform_api_conn, i) + # Determine whether the transceiver type supports TX Bias + if not self.is_xcvr_optical(info_dict): + logger.warning("test_get_tx_bias: Skipping transceiver {} (not applicable for this transceiver type)".format(i)) + continue tx_bias = sfp.get_tx_bias(platform_api_conn, i) if self.expect(tx_bias is not None, "Unable to retrieve transceiver {} TX bias data".format(i)): self.expect(isinstance(tx_bias, list) and (all(isinstance(item, float) for item in tx_bias)), From 03e849b85408bb69379782a64a0181705d53c93e Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Mon, 2 Aug 2021 19:39:28 +0800 Subject: [PATCH 034/117] Revert "[test_fib] Enable storage backend topologies (#3909)" (#3927) This reverts commit 0cedade1a50dbe2a41ef8a490a7af19977e8c6bb. --- tests/common/fixtures/fib_utils.py | 17 ++++------------- tests/fib/test_fib.py | 1 - 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/tests/common/fixtures/fib_utils.py b/tests/common/fixtures/fib_utils.py index 86091132f5f..ebaad821248 100644 --- a/tests/common/fixtures/fib_utils.py +++ b/tests/common/fixtures/fib_utils.py @@ -115,7 +115,6 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): po = asic_cfg_facts.get('PORTCHANNEL', {}) ports = asic_cfg_facts.get('PORT', {}) - sub_interfaces = asic_cfg_facts.get('VLAN_SUB_INTERFACE', {}) with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: fib = json.load(fp) @@ -136,9 +135,7 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): else: oports.append([str(duts_mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) else: - if sub_interfaces.has_key(ifname): - oports.append([str(duts_mg_facts['minigraph_ptf_indices'][ifname.split('.')[0]])]) - elif ports.has_key(ifname): + if ports.has_key(ifname): if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int': skip = True else: @@ -184,8 +181,8 @@ def gen_fib_info_file(ptfhost, fib_info, filename): ptfhost.copy(src=tmp_fib_info.name, dest=filename) -@pytest.fixture(scope='function') -def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_facts, tbinfo, request): +@pytest.fixture(scope='module') +def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_facts, tbinfo): """Get FIB info from database and store to text files on PTF host. For T2 topology, generate a single file to /root/fib_info_all_duts.txt to PTF host. @@ -203,17 +200,11 @@ def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_ list: List of FIB info file names on PTF host. """ duts_config_facts = duts_running_config_facts - testname = request.node.name files = [] if tbinfo['topo']['type'] != "t2": for dut_index, duthost in enumerate(duthosts): fib_info = get_fib_info(duthost, duts_config_facts[duthost.hostname], duts_minigraph_facts[duthost.hostname]) - if 'test_basic_fib' in testname and 'backend' in tbinfo['topo']['name']: - # if it is a storage backend topology(bt0 or bt1) and testcase is test_basic_fib - # add a default route as failover in the prefix matching - fib_info[u'0.0.0.0/0'] = [] - fib_info[u'::/0'] = [] - filename = '/root/fib_info_dut_{0}_{1}.txt'.format(testname, dut_index) + filename = '/root/fib_info_dut{}.txt'.format(dut_index) gen_fib_info_file(ptfhost, fib_info, filename) files.append(filename) else: diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index f281e6cbdd0..a3b8e502c0b 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,7 +10,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] -from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import ptf_test_port_map from tests.ptf_runner import ptf_runner from tests.common.helpers.assertions import pytest_assert From 306e8a057427fec69dd62aaae594441838477db1 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Mon, 2 Aug 2021 08:37:24 -0700 Subject: [PATCH 035/117] Skip snmp default route testcase on T1 backend (#3917) Fixes #3872 Since T1 backend topology has no default route, this testcase needs to be skipped Signed-off-by: Neetha John How did you verify/test it? Ran this testcase on t1-backend topo and verified that it is skipped --- tests/snmp/test_snmp_default_route.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/snmp/test_snmp_default_route.py b/tests/snmp/test_snmp_default_route.py index c75808e6b00..1bf1f8872bb 100644 --- a/tests/snmp/test_snmp_default_route.py +++ b/tests/snmp/test_snmp_default_route.py @@ -1,5 +1,6 @@ import pytest +from tests.common.helpers.assertions import pytest_require from tests.common.helpers.snmp_helpers import get_snmp_facts pytestmark = [ @@ -9,10 +10,12 @@ @pytest.mark.bsl -def test_snmp_default_route(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, creds_all_duts): +def test_snmp_default_route(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, creds_all_duts, tbinfo): """compare the snmp facts between observed states and target state""" duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + pytest_require('t1-backend' not in tbinfo['topo']['name'], "Skip this testcase since this topology {} has no default routes".format(tbinfo['topo']['name'])) + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] dut_result = duthost.shell('show ip route 0.0.0.0/0 | grep "\*"') From f549cfcfa5c5909dc431278e1e6c2727dd6063bd Mon Sep 17 00:00:00 2001 From: Neetha John Date: Mon, 2 Aug 2021 08:39:26 -0700 Subject: [PATCH 036/117] [test_default_route] Fix false pass on backend T1 (#3920) This testcase had a false pass on backend T1 topo which does not have a default route. The issue is because 'nexthop' key is always present in the rtinfo dict [in this case, it will have an empty list as its value]. Instead of checking for the key presence, we should check if there are values populated for the 'nexthop' Signed-off-by: Neetha John How did you verify/test it? Ran the test with the change on backend T1 and it failed as expected --- tests/route/test_default_route.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/route/test_default_route.py b/tests/route/test_default_route.py index c19cfe03b4d..9575636fcf2 100644 --- a/tests/route/test_default_route.py +++ b/tests/route/test_default_route.py @@ -59,7 +59,7 @@ def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_ asichost = duthost.asic_instance(enum_asic_index) rtinfo = asichost.get_ip_route_info(ipaddress.ip_network(u"::/0")) - pytest_assert(rtinfo['nexthops'] > 0, "cannot find ipv6 nexthop for default route") + pytest_assert(len(rtinfo['nexthops']) > 0, "cannot find ipv6 nexthop for default route") for nh in rtinfo['nexthops']: pytest_assert(not nh[0].is_link_local, \ "use link local address {} for nexthop".format(nh[0])) From d3510a7ac471e44038fcb31a2af6168fd47fb3a3 Mon Sep 17 00:00:00 2001 From: Vladyslav Morokhovych Date: Mon, 2 Aug 2021 20:26:50 +0300 Subject: [PATCH 037/117] [route] Fix of static route redistribution in test_static_route (#3785) Summary: Fix of static route redistribution in test_static_route Approach What is the motivation for this PR? PR-3633 add check if static routes get redistributed in test_static_route But BGP does not redistribute static route by default. How did you do it? Added function that enable/disable redistribution of static routes. How did you verify/test it? Run route/test_static_route.py route/test_static_route.py::test_static_route PASSED route/test_static_route.py::test_static_route_ecmp PASSED route/test_static_route.py::test_static_route_ipv6 PASSED route/test_static_route.py::test_static_route_ecmp_ipv6 PASSED Signed-off-by: Vladyslav Morokhovych --- tests/route/test_static_route.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/route/test_static_route.py b/tests/route/test_static_route.py index 6a52e12dcc8..67a95f168bd 100644 --- a/tests/route/test_static_route.py +++ b/tests/route/test_static_route.py @@ -140,6 +140,16 @@ def check_route_redistribution(duthost, prefix, ipv6, removed=False): assert prefix in adv_routes +def route_redistribution_static(duthost, ipv6, removed=False): + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + if ipv6: + duthost.shell("vtysh -c 'configure terminal' -c 'router bgp {}' -c 'address-family ipv6' -c '{}redistribute static'".format(mg_facts["minigraph_bgp_asn"], + "no " if removed else '')) + else: + duthost.shell("vtysh -c 'configure terminal' -c 'router bgp {}' -c '{}redistribute static'".format(mg_facts["minigraph_bgp_asn"], + "no " if removed else '')) + + def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False, config_reload_test=False): # Clean up arp or ndp clear_arp_ndp(duthost, ipv6=ipv6) @@ -148,6 +158,8 @@ def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_ add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6) try: + # Enable redistribution of static routes + route_redistribution_static(duthost, ipv6) # Add static route duthost.shell("sonic-db-cli CONFIG_DB hmset 'STATIC_ROUTE|{}' nexthop {}".format(prefix, ",".join(nexthop_addrs))) time.sleep(5) @@ -163,6 +175,7 @@ def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_ if config_reload_test: duthost.shell('config save -y') config_reload(duthost, wait=350) + route_redistribution_static(duthost, ipv6) generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6) check_route_redistribution(duthost, prefix, ipv6) @@ -176,7 +189,8 @@ def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_ # Check the advertised route get removed time.sleep(5) check_route_redistribution(duthost, prefix, ipv6, removed=True) - + # Disable redistribution of static routes + route_redistribution_static(duthost, ipv6, removed=True) # Config save if the saved config_db was updated if config_reload_test: duthost.shell('config save -y') From b1a21ca41a3627fba602e36a47e73107ae29ee40 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Mon, 2 Aug 2021 21:19:04 -0400 Subject: [PATCH 038/117] [sanity]: Add log for mux sanity failure (#3929) Add a warning log when the check failures which contains the mux status returned by the mux simulator server. Signed-off-by: Lawrence Lee --- tests/common/plugins/sanity_check/checks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index 8340c1cdc20..8030b5fc0b4 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -504,6 +504,7 @@ def _check(*args, **kwargs): failed, reason = _check_single_intf_status(status, expected_side=side) if failed: + logger.warning('Mux sanity check failed for status:\n{}'.format(status)) results['failed'] = failed results['failed_reason'] = reason results['action'] = reset_simulator_port From 27942bf30abce53023b4d462c5b785889ddf3c84 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Tue, 3 Aug 2021 15:12:03 +0800 Subject: [PATCH 039/117] Fix config_fact (#3933) Signed-off-by: bingwang --- ansible/library/config_facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index 48256022a27..c80eae2e809 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -73,7 +73,7 @@ def create_maps(config): #get the port_index from config_db if available port_index_map = { - name: int(v['index']) + name: int(v['index']) - 1 for name, v in config['PORT'].iteritems() if 'index' in v } From a715ec90ddb4cddbf0779c62a63c8c28abd93a48 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Tue, 3 Aug 2021 07:12:10 -0700 Subject: [PATCH 040/117] [installer] issue command to be sh compatible (#3928) What is the motivation for this PR? ansible module issue commands with 'sh' as shell. How did you do it? update the syntax to be sh compatible Signed-off-by: Ying Xie ying.xie@microsoft.com How did you verify/test it? Trigger a test when the swap file is in use. --- ansible/library/reduce_and_add_sonic_images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/library/reduce_and_add_sonic_images.py b/ansible/library/reduce_and_add_sonic_images.py index e700fe9bfa9..ba0b19edc5a 100644 --- a/ansible/library/reduce_and_add_sonic_images.py +++ b/ansible/library/reduce_and_add_sonic_images.py @@ -69,7 +69,7 @@ def setup_swap_if_necessary(module): if total < 2048 or avail < 1200: # Memory size or available amount is low, there is risk of OOM during new # image installation. Create a temporary swap file. - exec_command(module, cmd="if [[ -f {0} ]]; then sudo swapoff {0}; sudo rm -f {0}; fi; sudo fallocate -l 1G {0}; sudo chmod 600 {0}; sudo mkswap {0}; sudo swapon {0}".format('/host/swapfile'), + exec_command(module, cmd="if [ -f {0} ]; then sudo swapoff {0}; sudo rm -f {0}; fi; sudo fallocate -l 1G {0}; sudo chmod 600 {0}; sudo mkswap {0}; sudo swapon {0}".format('/host/swapfile'), msg="Create a temporary swap file") From be9265663b9a9fb7dfee750330ca8103f4c7ad92 Mon Sep 17 00:00:00 2001 From: roysr-nv <69770290+roysr-nv@users.noreply.github.com> Date: Tue, 3 Aug 2021 20:42:34 +0300 Subject: [PATCH 041/117] Changing the reboot uptime checker to be more robust (#3935) Description of PR The checker is currently using the arbitrary value of 10 seconds gap between the 'now time before rebooting' and the uptime after rebooting. However,in case of soft boot- it is almost immediate (it executes /sbin/reboot) and with powerful CPUs we see it coming up in about 8-9 seconds which fails the check below. Hence this arbitrary wait was changed and now the test only verifies that the uptime is bigger than 'now time before rebooting'. Approach What is the motivation for this PR? Make the reboot downtime checker more robust How did you do it? Changed the reboot validation that the uptime is bigger than 'now time before rebooting'. How did you verify/test it? Ran the soft reboot test --- tests/common/reboot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/reboot.py b/tests/common/reboot.py index a73feb03a5f..cf2f4bdc944 100644 --- a/tests/common/reboot.py +++ b/tests/common/reboot.py @@ -195,7 +195,7 @@ def execute_reboot_helper(): pool.terminate() dut_uptime = duthost.get_up_time() logger.info('DUT {} up since {}'.format(hostname, dut_uptime)) - assert float(dut_uptime.strftime("%s")) - float(dut_datetime.strftime("%s")) > 10, "Device {} did not reboot".format(hostname) + assert float(dut_uptime.strftime("%s")) > float(dut_datetime.strftime("%s")), "Device {} did not reboot".format(hostname) def get_reboot_cause(dut): From f4a6b4672cf1b615e3cef8654e2d0a34df1e5780 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Tue, 3 Aug 2021 15:34:20 -0700 Subject: [PATCH 042/117] [dualtor][orch] Skip orchagent testcases on real dualtor testbed (#3931) Based on the findings here: Azure/sonic-buildimage#8088, dualtor testbeds which are part of orchagent test plan are to be skipped. This PR adds skips below cases real dualtor testbed: ``` test_orchagent_active_tor_downstream.py::test_active_tor_remove_neighbor_downstream_active test_orchagent_active_tor_downstream.py::test_downstream_ecmp_nexthops test_orchagent_mac_move.py::test_mac_move test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_t1_link_recovered test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_bgp_recovered test_orchagent_standby_tor_downstream.py::test_standby_tor_remove_neighbor_downstream_standby test_standby_tor_upstream_mux_toggle.py::test_standby_tor_upstream_mux_toggle test_orch_stress.py::test_change_mux_state test_orch_stress.py::test_flap_neighbor_entry_active ``` test_orch_stress.py::test_flap_neighbor_entry_standby --- tests/common/dualtor/dual_tor_mock.py | 8 ++++++++ tests/dualtor/test_orch_stress.py | 3 +++ .../test_orchagent_active_tor_downstream.py | 7 +++---- tests/dualtor/test_orchagent_mac_move.py | 1 + .../test_orchagent_standby_tor_downstream.py | 15 +++++++++------ .../test_standby_tor_upstream_mux_toggle.py | 4 +++- 6 files changed, 27 insertions(+), 11 deletions(-) diff --git a/tests/common/dualtor/dual_tor_mock.py b/tests/common/dualtor/dual_tor_mock.py index 631c7e1995d..6081f5988dc 100644 --- a/tests/common/dualtor/dual_tor_mock.py +++ b/tests/common/dualtor/dual_tor_mock.py @@ -8,8 +8,10 @@ ip_address, IPv4Address from tests.common import config_reload from tests.common.dualtor.dual_tor_utils import tor_mux_intfs +from tests.common.helpers.assertions import pytest_require __all__ = [ + 'require_mocked_dualtor', 'apply_active_state_to_orchagent', 'apply_dual_tor_neigh_entries', 'apply_dual_tor_peer_switch_route', @@ -135,6 +137,12 @@ def is_mocked_dualtor(tbinfo): return 'dualtor' not in tbinfo['topo']['name'] +@pytest.fixture +def require_mocked_dualtor(tbinfo): + pytest_require(is_t0_mocked_dualtor(tbinfo), "This testcase is designed for " + "single tor testbed with mock dualtor config. Skip this testcase on real dualtor testbed") + + def set_mux_state(dut, tbinfo, state, itfs, toggle_all_simulator_ports): if is_mocked_dualtor(tbinfo): set_dual_tor_state_to_orchagent(dut, state, itfs) diff --git a/tests/dualtor/test_orch_stress.py b/tests/dualtor/test_orch_stress.py index 65112e82cf1..0e28264eb35 100644 --- a/tests/dualtor/test_orch_stress.py +++ b/tests/dualtor/test_orch_stress.py @@ -147,6 +147,7 @@ def config_crm_polling_interval(rand_selected_dut): def test_change_mux_state( + require_mocked_dualtor, apply_mock_dual_tor_tables, apply_mock_dual_tor_kernel_configs, rand_selected_dut, @@ -214,6 +215,7 @@ def add_neighbors(dut, neighbors, interface): def test_flap_neighbor_entry_active( + require_mocked_dualtor, apply_mock_dual_tor_tables, apply_mock_dual_tor_kernel_configs, rand_selected_dut, @@ -247,6 +249,7 @@ def test_flap_neighbor_entry_active( def test_flap_neighbor_entry_standby( + require_mocked_dualtor, apply_mock_dual_tor_tables, apply_mock_dual_tor_kernel_configs, rand_selected_dut, diff --git a/tests/dualtor/test_orchagent_active_tor_downstream.py b/tests/dualtor/test_orchagent_active_tor_downstream.py index 52eaf4e7d3d..76035a71592 100644 --- a/tests/dualtor/test_orchagent_active_tor_downstream.py +++ b/tests/dualtor/test_orchagent_active_tor_downstream.py @@ -33,7 +33,7 @@ def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - set_crm_polling_interval, + require_mocked_dualtor, set_crm_polling_interval, tunnel_traffic_monitor, vmhost ): """ @@ -84,9 +84,8 @@ def stop_garp(ptfhost): def test_downstream_ecmp_nexthops( - ptfadapter, - rand_selected_dut, tbinfo, - toggle_all_simulator_ports, + ptfadapter, rand_selected_dut, tbinfo, + require_mocked_dualtor, toggle_all_simulator_ports, tor_mux_intfs ): dst_server_ipv4 = "1.1.1.2" diff --git a/tests/dualtor/test_orchagent_mac_move.py b/tests/dualtor/test_orchagent_mac_move.py index 9278c9f9c16..eb6b6104549 100644 --- a/tests/dualtor/test_orchagent_mac_move.py +++ b/tests/dualtor/test_orchagent_mac_move.py @@ -81,6 +81,7 @@ def enable_garp(duthost): def test_mac_move( + require_mocked_dualtor, announce_new_neighbor, apply_active_state_to_orchagent, conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, set_crm_polling_interval, diff --git a/tests/dualtor/test_orchagent_standby_tor_downstream.py b/tests/dualtor/test_orchagent_standby_tor_downstream.py index d821e4f5d47..1491dfc8492 100644 --- a/tests/dualtor/test_orchagent_standby_tor_downstream.py +++ b/tests/dualtor/test_orchagent_standby_tor_downstream.py @@ -99,7 +99,8 @@ def shutdown_one_bgp_session(rand_selected_dut): startup_bgp_session(rand_selected_dut, bgp_shutdown) -def test_standby_tor_downstream(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo): +def test_standby_tor_downstream(ptfhost, rand_selected_dut, rand_unselected_dut, + tbinfo, require_mocked_dualtor): """ Verify tunnel traffic to active ToR is distributed equally across nexthops, and no traffic is forwarded to server from standby ToR @@ -108,7 +109,8 @@ def test_standby_tor_downstream(ptfhost, rand_selected_dut, rand_unselected_dut, check_tunnel_balance(**params) -def test_standby_tor_downstream_t1_link_recovered(ptfhost, rand_selected_dut, rand_unselected_dut, verify_crm_nexthop_counter_not_increased, tbinfo): +def test_standby_tor_downstream_t1_link_recovered(ptfhost, rand_selected_dut, rand_unselected_dut, + require_mocked_dualtor, verify_crm_nexthop_counter_not_increased, tbinfo): """ Verify traffic is distributed evenly after t1 link is recovered; Verify CRM that no new nexthop created @@ -134,7 +136,8 @@ def test_standby_tor_downstream_t1_link_recovered(ptfhost, rand_selected_dut, ra check_tunnel_balance(**params) -def test_standby_tor_downstream_bgp_recovered(ptfhost, rand_selected_dut, rand_unselected_dut, verify_crm_nexthop_counter_not_increased, tbinfo): +def test_standby_tor_downstream_bgp_recovered(ptfhost, rand_selected_dut, rand_unselected_dut, + require_mocked_dualtor, verify_crm_nexthop_counter_not_increased, tbinfo): """ Verify traffic is shifted to the active links and no traffic drop observed; Verify traffic is distributed evenly after BGP session is recovered; @@ -177,7 +180,7 @@ def test_standby_tor_downstream_loopback_route_readded(ptfhost, rand_selected_du def test_standby_tor_remove_neighbor_downstream_standby( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - set_crm_polling_interval, + require_mocked_dualtor, set_crm_polling_interval, tunnel_traffic_monitor, vmhost ): """ @@ -224,8 +227,8 @@ def stop_garp(ptfhost): def test_downstream_standby_mux_toggle_active( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - tunnel_traffic_monitor, vmhost, toggle_all_simulator_ports, - tor_mux_intfs + require_mocked_dualtor, tunnel_traffic_monitor, + vmhost, toggle_all_simulator_ports, tor_mux_intfs ): # set rand_selected_dut as standby and rand_unselected_dut to active tor test_params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo) diff --git a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py index 70b2587e063..5a29f855122 100644 --- a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py +++ b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py @@ -47,7 +47,9 @@ def test_cleanup(rand_selected_dut): config_reload(rand_selected_dut) -def test_standby_tor_upstream_mux_toggle(rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, toggle_all_simulator_ports, set_crm_polling_interval): +def test_standby_tor_upstream_mux_toggle( + rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, + require_mocked_dualtor, toggle_all_simulator_ports, set_crm_polling_interval): itfs, ip = rand_selected_interface PKT_NUM = 100 # Step 1. Set mux state to standby and verify traffic is dropped by ACL rule and drop counters incremented From bf750efbba0f48eefff2d02b4948b1788134d6cc Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Tue, 3 Aug 2021 20:52:50 -0400 Subject: [PATCH 043/117] [sanity]: Recover DUT before VMs (#3938) Signed-off-by: Lawrence Lee --- tests/common/plugins/sanity_check/__init__.py | 4 ++-- tests/common/plugins/sanity_check/recover.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/common/plugins/sanity_check/__init__.py b/tests/common/plugins/sanity_check/__init__.py index 17db4f0e109..403632380f2 100644 --- a/tests/common/plugins/sanity_check/__init__.py +++ b/tests/common/plugins/sanity_check/__init__.py @@ -242,10 +242,10 @@ def sanity_check(localhost, duthosts, request, fanouthosts, nbrhosts, tbinfo): and callable(failed_result['action']): infra_recovery_actions.append(failed_result['action']) for dut_name, dut_results in dut_failed_results.items(): - # Attempt to restore neighbor VM state - neighbor_vm_restore(duthosts[dut_name], nbrhosts, tbinfo) # Attempt to restore DUT state recover(duthosts[dut_name], localhost, fanouthosts, dut_results, recover_method) + # Attempt to restore neighbor VM state + neighbor_vm_restore(duthosts[dut_name], nbrhosts, tbinfo) for action in infra_recovery_actions: action() diff --git a/tests/common/plugins/sanity_check/recover.py b/tests/common/plugins/sanity_check/recover.py index 9891b9eacab..2ee3c8973e8 100644 --- a/tests/common/plugins/sanity_check/recover.py +++ b/tests/common/plugins/sanity_check/recover.py @@ -106,6 +106,7 @@ def recover(dut, localhost, fanouthosts, check_results, recover_method): def neighbor_vm_restore(duthost, nbrhosts, tbinfo): + logger.info("Restoring neighbor VMs for {}".format(duthost)) mg_facts = duthost.get_extended_minigraph_facts(tbinfo) vm_neighbors = mg_facts['minigraph_neighbors'] lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] From ef53e946eaa2d47c762a9d6ff7193e39e9de0100 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Wed, 4 Aug 2021 11:45:43 +0800 Subject: [PATCH 044/117] Add script `restart_nightly_ptf` to restart nightly testbed ptf (#3902) Approach What is the motivation for this PR? Restart testbeds' ptf that have pipelines defined. How did you do it? Run all testbeds in each server per process. How did you verify/test it? Signed-off-by: Longxiang Lyu --- ansible/restart_nightly_ptf.py | 161 +++++++++++++++++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 ansible/restart_nightly_ptf.py diff --git a/ansible/restart_nightly_ptf.py b/ansible/restart_nightly_ptf.py new file mode 100644 index 00000000000..9844db4a6e1 --- /dev/null +++ b/ansible/restart_nightly_ptf.py @@ -0,0 +1,161 @@ +import argparse +import logging +import imp +import os +import recover_server +import sys +import collections +import datetime +import time +import tempfile + +# Add tests path to syspath +sys.path.append('../') + + +ANSIBLE_DIR = os.path.abspath(os.path.dirname(__file__)) +SONIC_MGMT_DIR = os.path.dirname(ANSIBLE_DIR) + + +class TaskRestartPTF(recover_server.Task): + """Task restart-ptf.""" + def __init__(self, tbname, passfile, log_save_dir, tbfile=None, vmfile=None, dry_run=False): + recover_server.Task.__init__(self, tbname + '_restart_ptf', log_save_dir=log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=dry_run) + self.args.extend(('restart-ptf', tbname, passfile)) + self.tbname = tbname + + +class JobRuntimeError(Exception): + pass + + +class Job(object): + """Runs multiple Tasks.""" + + def __init__(self, jobname, **kwargs): + self.jobname = jobname + self.failed_task = None + self.dry_run = kwargs.get('dry_run', False) + self.ignore_errors = True + passfile = kwargs['passfile'] + tbfile = kwargs.get('tbfile') + vmfile = kwargs.get('vmfile') + log_save_dir = kwargs.get('log_save_dir') + tbname = kwargs['tbname'] + self.tasks = [ + TaskRestartPTF(tbname, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run) + ] + + def __call__(self): + """ + Run the tasks in the job sequentially. + + If one task failed to finish with non-zero return code, all the tasks + after will be skipped. + """ + for task in self.tasks: + task() + if not self.dry_run and task.returncode != 0: + self.failed_task = task + break + if self.failed_task is not None and not self.ignore_errors: + raise JobRuntimeError + + +def parse_testbed(testbedfile, servers): + """Return a dictionary containing mapping from server name to nightly testbeds that need restart-ptf.""" + testbed = imp.load_source('testbed', os.path.join(SONIC_MGMT_DIR, 'tests/common/testbed.py')) + all_testbeds = testbed.TestbedInfo(testbedfile).testbed_topo + nightly_dir = os.path.join(SONIC_MGMT_DIR, ".azure-pipelines", "nightly") + nightly_testbeds = [] + for _, _, files in os.walk(nightly_dir): + nightly_testbeds.extend(_.split(".")[0] for _ in files if _.startswith("vms") and _.endswith("yml")) + nightly_testbeds = list(set(nightly_testbeds)) + nightly_testbeds.sort() + should_restart = collections.defaultdict(list) + for tbname in set(nightly_testbeds): + if tbname not in all_testbeds: + logging.error("Failed to find testbed %s from testbed file %s", tbname, testbedfile) + continue + server = all_testbeds[tbname]["server"] + if "ptf" in all_testbeds[tbname]["ptf_image_name"]: + should_restart[server].append(tbname) + if servers: + return {s: should_restart[s] for s in servers} + return dict(should_restart) + + + +def do_jobs(testbeds, passfile, tbfile=None, vmfile=None, dry_run=False): + + def _do_jobs(jobs): + for job in jobs: + try: + job() + except JobRuntimeError: + # if one job raises JobRunTimeRrror signaling its failure, + # we need to skip all jobs after. This enable us to skip all + # those tasks after the server cleanup if cleanup fails. + break + + def _join_all(threads): + alive_threads = collections.deque(threads) + while True: + for _ in range(len(alive_threads)): + alive_thread = alive_threads.popleft() + alive_thread.join(timeout=0) + if alive_thread.is_alive(): + alive_threads.append(alive_thread) + if not alive_threads: + break + time.sleep(5) + + utilities = imp.load_source('utilities', os.path.join(SONIC_MGMT_DIR, 'tests/common/utilities.py')) + + curr_date = datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S') + log_save_dir = os.path.join(tempfile.gettempdir(), 'recover_server_' + curr_date) + logging.info('LOG PATH: %s', log_save_dir) + threads = [] + for server, tbnames in testbeds.items(): + log_save_dir_per_server = os.path.join(log_save_dir, server) + os.makedirs(log_save_dir_per_server) + jobs = [ + Job( + "restart-ptf", + server=server, + tbname=tbname, + passfile=passfile, + tbfile=tbfile, + vmfile=vmfile, + log_save_dir=log_save_dir_per_server, + dry_run=dry_run + ) for tbname in tbnames + ] + thread = utilities.InterruptableThread(name=server, target=_do_jobs, args=(jobs,)) + thread.start() + threads.append(thread) + + _join_all(threads) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Recover testbed servers.') + parser.add_argument('--testbed-servers', default=[], action='append', type=str, required=True, help='testbed server to recover') + parser.add_argument('--testbed', default='testbed.yaml', help='testbed file(default: testbed.yaml)') + parser.add_argument('--vm-file', default='veos', help='vm inventory file(default: veos)') + parser.add_argument('--passfile', default='password.txt', help='Ansible vault password file(default: password.txt)') + parser.add_argument('--dry-run', action='store_true', help='Dry run') + parser.add_argument('--log-level', choices=['debug', 'info', 'warn', 'error', 'critical'], default='info', help='logging output level') + args = parser.parse_args() + + servers = args.testbed_servers + tbfile = args.testbed + vmfile = args.vm_file + passfile = args.passfile + dry_run = args.dry_run + log_level = args.log_level + + recover_server.handler.setLevel(getattr(logging, log_level.upper())) + + testbeds = parse_testbed(tbfile, servers) + do_jobs(testbeds, passfile, tbfile=tbfile, vmfile=vmfile, dry_run=dry_run) From 885c909b7467a2fa05fe282933fb1287ef36ddac Mon Sep 17 00:00:00 2001 From: AntonHryshchuk <76687950+AntonHryshchuk@users.noreply.github.com> Date: Wed, 4 Aug 2021 14:52:32 +0300 Subject: [PATCH 045/117] [t0-56] test_sflow adaptation to t0-56 topology (#3796) Summary: Adaptation of sflow test to other topologies. Currently the test using static number of tested PortChannels - 4. Added option to test dynamic number of LAGs. --- ansible/roles/test/files/ptftests/sflow_test.py | 4 ++-- tests/sflow/test_sflow.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/test/files/ptftests/sflow_test.py b/ansible/roles/test/files/ptftests/sflow_test.py index 20b66249ca7..ccdc92034b4 100644 --- a/ansible/roles/test/files/ptftests/sflow_test.py +++ b/ansible/roles/test/files/ptftests/sflow_test.py @@ -209,7 +209,7 @@ def analyze_flow_sample(self, data, collector): #--------------------------------------------------------------------------- def sendTraffic(self): - self.src_ip_list = ['192.158.8.1','192.168.16.1', '192.168.24.1','192.168.32.1'] + src_ip_addr_templ = '192.168.{}.1' ip_dst_addr = '192.168.0.4' src_mac = self.dataplane.get_mac(0, 0) pktlen=100 @@ -217,7 +217,7 @@ def sendTraffic(self): for j in range(0, 100, 1): index = 0 for intf in self.interfaces: - ip_src_addr = str(self.src_ip_list[index]) + ip_src_addr = src_ip_addr_templ.format(str(8 * index)) src_port = self.interfaces[intf]['ptf_indices'] dst_port = self.dst_port tcp_pkt = simple_tcp_packet(pktlen=pktlen, diff --git a/tests/sflow/test_sflow.py b/tests/sflow/test_sflow.py index 776e3486a81..602fe8a1413 100644 --- a/tests/sflow/test_sflow.py +++ b/tests/sflow/test_sflow.py @@ -356,9 +356,9 @@ def testIntfRemoval(self, sflowbase_config, duthost, partial_ptf_runner): verify_sflow_interfaces(duthost,sflow_int[0],'down',512) verify_sflow_interfaces(duthost,sflow_int[1],'down',512) - verify_sflow_interfaces(duthost,sflow_int[2],'up',512) - verify_sflow_interfaces(duthost,sflow_int[3],'up',512) - enabled_intf = sflow_int[2:4] + enabled_intf = sflow_int[2:] + for intf in enabled_intf: + verify_sflow_interfaces(duthost, intf, 'up', 512) partial_ptf_runner( enabled_sflow_interfaces=enabled_intf, active_collectors="['collector0','collector1']" ) From 6a2e2ff1e8f6a7e43be1d72e64bbd52b74bcb0cb Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Wed, 4 Aug 2021 17:08:19 +0300 Subject: [PATCH 046/117] [dhcp_relay] DHCPv6 automatic test (#3767) What is the motivation for this PR? Add DHCPv6 automatic test infrastructure and PTF test. How did you do it? * Enhance the minigraph_facts.py and minigraph_dpg.j2 to support DHCPv6 instances. * Add DHCPv6 servers to lab.yml file. * This will deploy on a DUT DHCPv6 servers on regression run. * Develop a test with 3 test cases: * test_dhcp_relay_default * test_dhcp_relay_after_link_flap * test_dhcp_relay_start_with_uplinks_down * Develop a PTF runner test to simulate the traffic according to the test case from sonic-mgmt. How did you verify/test it? Use testbed-cli.sh to generate and deploy a minigraph with DHCPv6 instances on a switch. Run the test. This test depends on PR: [dhcp_relay] DHCP relay support for IPv6 sonic-buildimage#7772 Supported testbed topology if it's a new test case? T0 Signed-off-by: Shlomi Bitton --- ansible/group_vars/lab/lab.yml | 3 + ansible/library/minigraph_facts.py | 15 +- .../test/files/ptftests/dhcpv6_relay_test.py | 306 ++++++++++++++++++ ansible/templates/minigraph_dpg.j2 | 4 + tests/dhcp_relay/test_dhcpv6_relay.py | 214 ++++++++++++ tests/kvmtest.sh | 1 + 6 files changed, 540 insertions(+), 3 deletions(-) create mode 100644 ansible/roles/test/files/ptftests/dhcpv6_relay_test.py create mode 100644 tests/dhcp_relay/test_dhcpv6_relay.py diff --git a/ansible/group_vars/lab/lab.yml b/ansible/group_vars/lab/lab.yml index 002893f4549..db55c15e02f 100644 --- a/ansible/group_vars/lab/lab.yml +++ b/ansible/group_vars/lab/lab.yml @@ -39,6 +39,9 @@ snmp_servers: ['10.0.0.9'] # dhcp relay servers dhcp_servers: ['192.0.0.1', '192.0.0.2', '192.0.0.3', '192.0.0.4'] +# dhcpv6 relay servers +dhcpv6_servers: ['fc02:2000::1', 'fc02:2000::2', 'fc02:2000::3', 'fc02:2000::4'] + # snmp variables snmp_rocommunity: public snmp_location: testlab diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py index d4c6b819bba..f7dee8bf3ca 100644 --- a/ansible/library/minigraph_facts.py +++ b/ansible/library/minigraph_facts.py @@ -379,6 +379,7 @@ def _parse_intf(intfname, ipprefix): vlanintfs = child.find(str(QName(ns, "VlanInterfaces"))) dhcp_servers = [] + dhcpv6_servers = [] vlans = {} for vintf in vlanintfs.findall(str(QName(ns, "VlanInterface"))): vintfname = vintf.find(str(QName(ns, "Name"))).text @@ -392,6 +393,12 @@ def _parse_intf(intfname, ipprefix): else: vlandhcpservers = "" dhcp_servers = vlandhcpservers.split(";") + vintf_node = vintf.find(str(QName(ns, "Dhcpv6Relays"))) + if vintf_node is not None and vintf_node.text is not None: + vlandhcpservers = vintf_node.text + else: + vlandhcpservers = "" + dhcpv6_servers = vlandhcpservers.split(";") for i, member in enumerate(vmbr_list): # Skip PortChannel inside Vlan if member in pcs: @@ -421,7 +428,7 @@ def _parse_intf(intfname, ipprefix): if acl_intfs: acls[aclname] = acl_intfs - return intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers + return intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers, dhcpv6_servers return None, None, None, None, None, None, None def parse_cpg(cpg, hname): @@ -601,6 +608,7 @@ def parse_xml(filename, hostname, asic_name=None): resource_type = None syslog_servers = [] dhcp_servers = [] + dhcpv6_servers = [] ntp_servers = [] mgmt_routes = [] bgp_peers_with_range = [] @@ -632,7 +640,7 @@ def parse_xml(filename, hostname, asic_name=None): for child in root: if asic_name is None: if child.tag == str(QName(ns, "DpgDec")): - (intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers) = parse_dpg(child, hostname) + (intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers, dhcpv6_servers) = parse_dpg(child, hostname) elif child.tag == str(QName(ns, "CpgDec")): (bgp_sessions, bgp_asn, bgp_peers_with_range) = parse_cpg(child, hostname) elif child.tag == str(QName(ns, "PngDec")): @@ -643,7 +651,7 @@ def parse_xml(filename, hostname, asic_name=None): (syslog_servers, ntp_servers, mgmt_routes, deployment_id, resource_type) = parse_meta(child, hostname) else: if child.tag == str(QName(ns, "DpgDec")): - (intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers) = parse_dpg(child, asic_name) + (intfs, lo_intfs, mgmt_intf, vlans, pcs, acls, dhcp_servers, dhcpv6_servers) = parse_dpg(child, asic_name) host_lo_intfs = parse_host_loopback(child, hostname) elif child.tag == str(QName(ns, "CpgDec")): (bgp_sessions, bgp_asn, bgp_peers_with_range) = parse_cpg(child, asic_name) @@ -739,6 +747,7 @@ def parse_xml(filename, hostname, asic_name=None): results['minigraph_mgmt'] = get_mgmt_info(devices, mgmt_dev, mgmt_port) results['syslog_servers'] = syslog_servers results['dhcp_servers'] = dhcp_servers + results['dhcpv6_servers'] = dhcpv6_servers results['ntp_servers'] = ntp_servers results['forced_mgmt_routes'] = mgmt_routes results['deployment_id'] = deployment_id diff --git a/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py b/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py new file mode 100644 index 00000000000..9fc96bfecc5 --- /dev/null +++ b/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py @@ -0,0 +1,306 @@ +import ast +import subprocess + +# Packet Test Framework imports +import ptf +import ptf.packet as packet +import ptf.testutils as testutils +from ptf import config +from ptf.base_tests import BaseTest +from ptf.mask import Mask + +IPv6 = scapy.layers.inet6.IPv6 + +class DataplaneBaseTest(BaseTest): + def __init__(self): + BaseTest.__init__(self) + + def setUp(self): + self.dataplane = ptf.dataplane_instance + self.dataplane.flush() + if config["log_dir"] is not None: + filename = os.path.join(config["log_dir"], str(self)) + ".pcap" + self.dataplane.start_pcap(filename) + + def tearDown(self): + if config["log_dir"] is not None: + self.dataplane.stop_pcap() + +""" + This test simulates a new host booting up on the VLAN network of a ToR and + requesting an IPv6 address via DHCPv6. Setup is as follows: + - DHCP client is simulated by listening/sending on an interface connected to VLAN of ToR. + - DHCP server is simulated by listening/sending on injected PTF interfaces which link + ToR to leaves. This way we can listen for traffic sent from DHCP relay out to would-be DHCPv6 servers + + This test performs the following functionality: + 1.) Simulated client broadcasts a DHCPv6 SOLICIT message. + 2.) Verify DHCP relay running on ToR receives the DHCPv6 SOLICIT message and send a DHCPv6 RELAY-FORWARD + message encapsulating the client DHCPv6 SOLICIT message and relays it to all of its known DHCP servers. + 3.) Simulate DHCPv6 RELAY-REPLY message send from a DHCP server to the ToR encapsulating DHCPv6 ADVERTISE message. + 4.) Verify DHCP relay receives the DHCPv6 RELAY-REPLY message decapsulate it and forwards DHCPv6 ADVERTISE + message to our simulated client. + 5.) Simulated client broadcasts a DHCPv6 REQUEST message. + 6.) Verify DHCP relay running on ToR receives the DHCPv6 REQUEST message and send a DHCPv6 RELAY-FORWARD + message encapsulating the client DHCPv6 REQUEST message and relays it to all of its known DHCP servers. + 7.) Simulate DHCPv6 RELAY-REPLY message send from a DHCP server to the ToR encapsulating DHCPv6 REPLY message. + 8.) Verify DHCP relay receives the DHCPv6 RELAY-REPLY message decapsulate it and forwards DHCPv6 REPLY + message to our simulated client. + +""" + +class DHCPTest(DataplaneBaseTest): + + BROADCAST_MAC = '33:33:00:01:00:02' + BROADCAST_IP = 'ff02::1:2' + DHCP_CLIENT_PORT = 546 + DHCP_SERVER_PORT = 547 + + def __init__(self): + self.test_params = testutils.test_params_get() + self.client_port_index = int(self.test_params['client_port_index']) + self.client_link_local = self.generate_client_interace_ipv6_link_local_address(self.client_port_index) + + DataplaneBaseTest.__init__(self) + + def setUp(self): + DataplaneBaseTest.setUp(self) + self.hostname = self.test_params['hostname'] + + # These are the interfaces we are injected into that link to out leaf switches + self.server_port_indices = ast.literal_eval(self.test_params['leaf_port_indices']) + self.num_dhcp_servers = int(self.test_params['num_dhcp_servers']) + self.assertTrue(self.num_dhcp_servers > 0, + "Error: This test requires at least one DHCP server to be specified!") + + # We will simulate a responding DHCP server on the first interface in the provided set + self.server_ip = self.test_params['server_ip'] + + self.relay_iface_ip = self.test_params['relay_iface_ip'] + self.relay_iface_mac = self.test_params['relay_iface_mac'] + self.relay_link_local = self.test_params['relay_link_local'] + + self.vlan_ip = self.test_params['vlan_ip'] + + self.client_mac = self.dataplane.get_mac(0, self.client_port_index) + + def generate_client_interace_ipv6_link_local_address(self, client_port_index): + # Shutdown and startup the client interface to generate a proper IPv6 link-local address + command = "ifconfig eth{} down".format(client_port_index) + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + proc.communicate() + + command = "ifconfig eth{} up".format(client_port_index) + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + proc.communicate() + + command = "ip addr show eth{} | grep inet6 | grep 'scope link' | awk '{{print $2}}' | cut -d '/' -f1".format(client_port_index) + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + stdout, stderr = proc.communicate() + + return stdout.strip() + + def tearDown(self): + DataplaneBaseTest.tearDown(self) + + + """ + Packet generation functions/wrappers + + """ + + def create_dhcp_solicit_packet(self): + + solicit_packet = Ether(src=self.client_mac, dst=self.BROADCAST_MAC) + solicit_packet /= IPv6(src=self.client_link_local, dst=self.BROADCAST_IP) + solicit_packet /= UDP(sport=self.DHCP_CLIENT_PORT, dport=self.DHCP_SERVER_PORT) + solicit_packet /= DHCP6_Solicit(trid=12345) + + return solicit_packet + + def create_dhcp_solicit_relay_forward_packet(self): + + solicit_relay_forward_packet = Ether(src=self.relay_iface_mac) + solicit_relay_forward_packet /= IPv6() + solicit_relay_forward_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) + solicit_relay_forward_packet /= DHCP6_RelayForward(msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) + solicit_relay_forward_packet /= DHCP6OptRelayMsg() + solicit_relay_forward_packet /= DHCP6_Solicit(trid=12345) + + return solicit_relay_forward_packet + + def create_dhcp_advertise_packet(self): + + advertise_packet = Ether(src=self.relay_iface_mac, dst=self.client_mac) + advertise_packet /= IPv6(src=self.relay_link_local, dst=self.client_link_local) + advertise_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_CLIENT_PORT) + advertise_packet /= DHCP6_Advertise(trid=12345) + + return advertise_packet + + def create_dhcp_advertise_relay_reply_packet(self): + + advertise_relay_reply_packet = Ether(dst=self.relay_iface_mac) + advertise_relay_reply_packet /= IPv6(src=self.server_ip, dst=self.relay_iface_ip) + advertise_relay_reply_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) + advertise_relay_reply_packet /= DHCP6_RelayReply(msgtype=13, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) + advertise_relay_reply_packet /= DHCP6OptRelayMsg() + advertise_relay_reply_packet /= DHCP6_Advertise(trid=12345) + + return advertise_relay_reply_packet + + def create_dhcp_request_packet(self): + + request_packet = Ether(src=self.client_mac, dst=self.BROADCAST_MAC) + request_packet /= IPv6(src=self.client_link_local, dst=self.BROADCAST_IP) + request_packet /= UDP(sport=self.DHCP_CLIENT_PORT, dport=self.DHCP_SERVER_PORT) + request_packet /= DHCP6_Request(trid=12345) + + return request_packet + + def create_dhcp_request_relay_forward_packet(self): + + request_relay_forward_packet = Ether(src=self.relay_iface_mac) + request_relay_forward_packet /= IPv6() + request_relay_forward_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) + request_relay_forward_packet /= DHCP6_RelayForward(msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) + request_relay_forward_packet /= DHCP6OptRelayMsg() + request_relay_forward_packet /= DHCP6_Request(trid=12345) + + return request_relay_forward_packet + + def create_dhcp_reply_packet(self): + + reply_packet = Ether(src=self.relay_iface_mac, dst=self.client_mac) + reply_packet /= IPv6(src=self.relay_link_local, dst=self.client_link_local) + reply_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_CLIENT_PORT) + reply_packet /= DHCP6_Reply(trid=12345) + + return reply_packet + + def create_dhcp_reply_relay_reply_packet(self): + + reply_relay_reply_packet = Ether(dst=self.relay_iface_mac) + reply_relay_reply_packet /= IPv6(src=self.server_ip, dst=self.relay_iface_ip) + reply_relay_reply_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) + reply_relay_reply_packet /= DHCP6_RelayReply(msgtype=13, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) + reply_relay_reply_packet /= DHCP6OptRelayMsg() + reply_relay_reply_packet /= DHCP6_Reply(trid=12345) + + return reply_relay_reply_packet + + + """ + Send/receive functions + + """ + + # Simulate client connecting on VLAN and broadcasting a DHCPv6 SOLICIT message + def client_send_solicit(self): + # Form and send DHCPv6 SOLICIT packet + solicit_packet = self.create_dhcp_solicit_packet() + testutils.send_packet(self, self.client_port_index, solicit_packet) + + # Verify that the DHCP relay actually received and relayed the DHCPv6 SOLICIT message to all of + # its known DHCP servers. + def verify_relayed_solicit_relay_forward(self): + # Create a packet resembling a DHCPv6 RELAY-FORWARD encapsulating SOLICIT packet + solicit_relay_forward_packet = self.create_dhcp_solicit_relay_forward_packet() + + # Mask off fields we don't care about matching + masked_packet = Mask(solicit_relay_forward_packet) + masked_packet.set_do_not_care_scapy(packet.Ether, "dst") + masked_packet.set_do_not_care_scapy(IPv6, "src") + masked_packet.set_do_not_care_scapy(IPv6, "dst") + masked_packet.set_do_not_care_scapy(IPv6, "fl") + masked_packet.set_do_not_care_scapy(IPv6, "tc") + masked_packet.set_do_not_care_scapy(IPv6, "plen") + masked_packet.set_do_not_care_scapy(IPv6, "nh") + masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") + masked_packet.set_do_not_care_scapy(packet.UDP, "len") + + # Count the number of these packets received on the ports connected to our leaves + solicit_count = testutils.count_matched_packets_all_ports(self, masked_packet, self.server_port_indices) + self.assertTrue(solicit_count >= 1, + "Failed: Solicit count of %d" % (solicit_count)) + + # Simulate a DHCP server sending a DHCPv6 RELAY-REPLY encapsulating ADVERTISE packet message to client. + # We do this by injecting a RELAY-REPLY encapsulating ADVERTISE message on the link connected to one + # of our leaf switches. + def server_send_advertise_relay_reply(self): + # Form and send DHCPv6 RELAY-REPLY encapsulating ADVERTISE packet + advertise_relay_reply_packet = self.create_dhcp_advertise_relay_reply_packet() + testutils.send_packet(self, self.server_port_indices[0], advertise_relay_reply_packet) + + # Verify that the DHCPv6 ADVERTISE would be received by our simulated client + def verify_relayed_advertise(self): + # Create a packet resembling a DHCPv6 ADVERTISE packet + advertise_packet = self.create_dhcp_advertise_packet() + + # Mask off fields we don't care about matching + masked_packet = Mask(advertise_packet) + masked_packet.set_do_not_care_scapy(IPv6, "fl") + masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") + masked_packet.set_do_not_care_scapy(packet.UDP, "len") + + # NOTE: verify_packet() will fail for us via an assert, so no need to check a return value here + testutils.verify_packet(self, masked_packet, self.client_port_index) + + # Simulate our client sending a DHCPv6 REQUEST message + def client_send_request(self): + # Form and send DHCPv6 REQUEST packet + request_packet = self.create_dhcp_request_packet() + testutils.send_packet(self, self.client_port_index, request_packet) + + # Verify that the DHCP relay actually received and relayed the DHCPv6 REQUEST message to all of + # its known DHCP servers. + def verify_relayed_request_relay_forward(self): + # Create a packet resembling a DHCPv6 RELAY-FORWARD encapsulating REQUEST packet + request_relay_forward_packet = self.create_dhcp_request_relay_forward_packet() + + # Mask off fields we don't care about matching + masked_packet = Mask(request_relay_forward_packet) + masked_packet.set_do_not_care_scapy(packet.Ether, "dst") + masked_packet.set_do_not_care_scapy(IPv6, "src") + masked_packet.set_do_not_care_scapy(IPv6, "dst") + masked_packet.set_do_not_care_scapy(IPv6, "fl") + masked_packet.set_do_not_care_scapy(IPv6, "tc") + masked_packet.set_do_not_care_scapy(IPv6, "plen") + masked_packet.set_do_not_care_scapy(IPv6, "nh") + masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") + masked_packet.set_do_not_care_scapy(packet.UDP, "len") + + # Count the number of these packets received on the ports connected to our leaves + request_count = testutils.count_matched_packets_all_ports(self, masked_packet, self.server_port_indices) + self.assertTrue(request_count >= 1, + "Failed: Request count of %d" % (request_count)) + + # Simulate a DHCP server sending a DHCPv6 RELAY-REPLY encapsulating REPLY packet message to client. + def server_send_reply_relay_reply(self): + # Form and send DHCPv6 RELAY-REPLY encapsulating REPLY packet + reply_relay_reply_packet = self.create_dhcp_reply_relay_reply_packet() + testutils.send_packet(self, self.server_port_indices[0], reply_relay_reply_packet) + + # Verify that the DHCPv6 REPLY would be received by our simulated client + def verify_relayed_reply(self): + # Create a packet resembling a DHCPv6 REPLY packet + reply_packet = self.create_dhcp_reply_packet() + + # Mask off fields we don't care about matching + masked_packet = Mask(reply_packet) + masked_packet.set_do_not_care_scapy(IPv6, "fl") + masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") + masked_packet.set_do_not_care_scapy(packet.UDP, "len") + + # NOTE: verify_packet() will fail for us via an assert, so no need to check a return value here + testutils.verify_packet(self, masked_packet, self.client_port_index) + + def runTest(self): + self.client_send_solicit() + self.verify_relayed_solicit_relay_forward() + self.server_send_advertise_relay_reply() + self.verify_relayed_advertise() + self.client_send_request() + self.verify_relayed_request_relay_forward() + self.server_send_reply_relay_reply() + self.verify_relayed_reply() diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2 index 4af2f5ff7df..92f606234d0 100644 --- a/ansible/templates/minigraph_dpg.j2 +++ b/ansible/templates/minigraph_dpg.j2 @@ -130,6 +130,10 @@ {% endif %} {% set dhcp_servers_str=';'.join(dhcp_servers) %} {{ dhcp_servers_str }} +{% if dhcpv6_servers is defined %} +{% set dhcpv6_servers_str=';'.join(dhcpv6_servers) %} + {{ dhcpv6_servers_str }} +{% endif %} {{ vlan_param['id'] }} {{ vlan_param['tag'] }} {{ vlan_param['prefix'] | ipaddr('network') }}/{{ vlan_param['prefix'] | ipaddr('prefix') }} diff --git a/tests/dhcp_relay/test_dhcpv6_relay.py b/tests/dhcp_relay/test_dhcpv6_relay.py new file mode 100644 index 00000000000..e699bfffdb0 --- /dev/null +++ b/tests/dhcp_relay/test_dhcpv6_relay.py @@ -0,0 +1,214 @@ +import ipaddress +import pytest +import random +import time +import netaddr + +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.ptf_runner import ptf_runner + +pytestmark = [ + pytest.mark.topology('t0'), + pytest.mark.device_type('vs') +] + +@pytest.fixture(scope="module") +def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): + """ Fixture which returns a list of dictionaries where each dictionary contains + data necessary to test one instance of a DHCP relay agent running on the DuT. + This fixture is scoped to the module, as the data it gathers can be used by + all tests in this module. It does not need to be run before each test. + """ + duthost = duthosts[rand_one_dut_hostname] + dhcp_relay_data_list = [] + uplink_interface_link_local = "" + + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + + # SONiC spawns one DHCP relay agent per VLAN interface configured on the DUT + vlan_dict = mg_facts['minigraph_vlans'] + for vlan_iface_name, vlan_info_dict in vlan_dict.items(): + # Gather information about the downlink VLAN interface this relay agent is listening on + downlink_vlan_iface = {} + downlink_vlan_iface['name'] = vlan_iface_name + + for vlan_interface_info_dict in mg_facts['minigraph_vlan_interfaces']: + if (vlan_interface_info_dict['attachto'] == vlan_iface_name) and (netaddr.IPAddress(str(vlan_interface_info_dict['addr'])).version == 6): + downlink_vlan_iface['addr'] = vlan_interface_info_dict['addr'] + downlink_vlan_iface['mask'] = vlan_interface_info_dict['mask'] + break + + # Obtain MAC address of the VLAN interface + res = duthost.shell('cat /sys/class/net/{}/address'.format(vlan_iface_name)) + downlink_vlan_iface['mac'] = res['stdout'] + + downlink_vlan_iface['dhcpv6_server_addrs'] = mg_facts['dhcpv6_servers'] + + # We choose the physical interface where our DHCP client resides to be index of first interface in the VLAN + client_iface = {} + client_iface['name'] = vlan_info_dict['members'][0] + client_iface['alias'] = mg_facts['minigraph_port_name_to_alias_map'][client_iface['name']] + client_iface['port_idx'] = mg_facts['minigraph_ptf_indices'][client_iface['name']] + + # Obtain uplink port indicies for this DHCP relay agent + uplink_interfaces = [] + uplink_port_indices =[] + for iface_name, neighbor_info_dict in mg_facts['minigraph_neighbors'].items(): + if neighbor_info_dict['name'] in mg_facts['minigraph_devices']: + neighbor_device_info_dict = mg_facts['minigraph_devices'][neighbor_info_dict['name']] + if 'type' in neighbor_device_info_dict and neighbor_device_info_dict['type'] == 'LeafRouter': + # If this uplink's physical interface is a member of a portchannel interface, + # we record the name of the portchannel interface here, as this is the actual + # interface the DHCP relay will listen on. + iface_is_portchannel_member = False + for portchannel_name, portchannel_info_dict in mg_facts['minigraph_portchannels'].items(): + if 'members' in portchannel_info_dict and iface_name in portchannel_info_dict['members']: + iface_is_portchannel_member = True + if portchannel_name not in uplink_interfaces: + uplink_interfaces.append(portchannel_name) + break + # If the uplink's physical interface is not a member of a portchannel, add it to our uplink interfaces list + if not iface_is_portchannel_member: + uplink_interfaces.append(iface_name) + uplink_port_indices.append(mg_facts['minigraph_ptf_indices'][iface_name]) + if uplink_interface_link_local == "": + command = "ip addr show {} | grep inet6 | grep 'scope link' | awk '{{print $2}}' | cut -d '/' -f1".format(uplink_interfaces[0]) + res = duthost.shell(command) + if res['stdout'] != "": + uplink_interface_link_local = res['stdout'] + + dhcp_relay_data = {} + dhcp_relay_data['downlink_vlan_iface'] = downlink_vlan_iface + dhcp_relay_data['client_iface'] = client_iface + dhcp_relay_data['uplink_interfaces'] = uplink_interfaces + dhcp_relay_data['uplink_port_indices'] = uplink_port_indices + dhcp_relay_data['uplink_interface_link_local'] = uplink_interface_link_local + + dhcp_relay_data_list.append(dhcp_relay_data) + + return dhcp_relay_data_list + + +@pytest.fixture(scope="module") +def validate_dut_routes_exist(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data): + """Fixture to valid a route to each DHCP server exist + """ + duthost = duthosts[rand_one_dut_hostname] + dhcp_servers = set() + for dhcp_relay in dut_dhcp_relay_data: + dhcp_servers |= set(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs']) + + for dhcp_server in dhcp_servers: + rtInfo = duthost.get_ip_route_info(ipaddress.ip_address(dhcp_server)) + assert len(rtInfo["nexthops"]) > 0, "Failed to find route to DHCP server '{0}'".format(dhcp_server) + + +def test_dhcp_relay_default(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp_relay_data, validate_dut_routes_exist): + """Test DHCP relay functionality on T0 topology. + For each DHCP relay agent running on the DuT, verify DHCP packets are relayed properly + """ + duthost = duthosts[rand_one_dut_hostname] + + for dhcp_relay in dut_dhcp_relay_data: + # Run the DHCP relay test on the PTF host + ptf_runner(ptfhost, + "ptftests", + "dhcpv6_relay_test.DHCPTest", + platform_dir="ptftests", + params={"hostname": duthost.hostname, + "client_port_index": dhcp_relay['client_iface']['port_idx'], + "leaf_port_indices": repr(dhcp_relay['uplink_port_indices']), + "num_dhcp_servers": len(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs']), + "server_ip": str(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs'][0]), + "relay_iface_ip": str(dhcp_relay['downlink_vlan_iface']['addr']), + "relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']), + "relay_link_local": str(dhcp_relay['uplink_interface_link_local']), + "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr'])}, + log_file="/tmp/dhcpv6_relay_test.DHCPTest.log") + + +def test_dhcp_relay_after_link_flap(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp_relay_data, validate_dut_routes_exist): + """Test DHCP relay functionality on T0 topology after uplinks flap + For each DHCP relay agent running on the DuT, with relay agent running, flap the uplinks, + then test whether the DHCP relay agent relays packets properly. + """ + duthost = duthosts[rand_one_dut_hostname] + + for dhcp_relay in dut_dhcp_relay_data: + # Bring all uplink interfaces down + for iface in dhcp_relay['uplink_interfaces']: + duthost.shell('ifconfig {} down'.format(iface)) + + # Sleep a bit to ensure uplinks are down + time.sleep(20) + + # Bring all uplink interfaces back up + for iface in dhcp_relay['uplink_interfaces']: + duthost.shell('ifconfig {} up'.format(iface)) + + # Sleep a bit to ensure uplinks are up + time.sleep(20) + + # Run the DHCP relay test on the PTF host + ptf_runner(ptfhost, + "ptftests", + "dhcpv6_relay_test.DHCPTest", + platform_dir="ptftests", + params={"hostname": duthost.hostname, + "client_port_index": dhcp_relay['client_iface']['port_idx'], + "leaf_port_indices": repr(dhcp_relay['uplink_port_indices']), + "num_dhcp_servers": len(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs']), + "server_ip": str(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs'][0]), + "relay_iface_ip": str(dhcp_relay['downlink_vlan_iface']['addr']), + "relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']), + "relay_link_local": str(dhcp_relay['uplink_interface_link_local']), + "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr'])}, + log_file="/tmp/dhcpv6_relay_test.DHCPTest.log") + + +def test_dhcp_relay_start_with_uplinks_down(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp_relay_data, validate_dut_routes_exist): + """Test DHCP relay functionality on T0 topology when relay agent starts with uplinks down + For each DHCP relay agent running on the DuT, bring the uplinks down, then restart the + relay agent while the uplinks are still down. Then test whether the DHCP relay agent + relays packets properly. + """ + duthost = duthosts[rand_one_dut_hostname] + + for dhcp_relay in dut_dhcp_relay_data: + # Bring all uplink interfaces down + for iface in dhcp_relay['uplink_interfaces']: + duthost.shell('ifconfig {} down'.format(iface)) + + # Sleep a bit to ensure uplinks are down + time.sleep(20) + + # Restart DHCP relay service on DUT + duthost.shell('systemctl restart dhcp_relay.service') + + # Sleep to give the DHCP relay container time to start up and + # allow the relay agent to begin listening on the down interfaces + time.sleep(40) + + # Bring all uplink interfaces back up + for iface in dhcp_relay['uplink_interfaces']: + duthost.shell('ifconfig {} up'.format(iface)) + + # Sleep a bit to ensure uplinks are up + time.sleep(20) + + # Run the DHCP relay test on the PTF host + ptf_runner(ptfhost, + "ptftests", + "dhcpv6_relay_test.DHCPTest", + platform_dir="ptftests", + params={"hostname": duthost.hostname, + "client_port_index": dhcp_relay['client_iface']['port_idx'], + "leaf_port_indices": repr(dhcp_relay['uplink_port_indices']), + "num_dhcp_servers": len(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs']), + "server_ip": str(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs'][0]), + "relay_iface_ip": str(dhcp_relay['downlink_vlan_iface']['addr']), + "relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']), + "relay_link_local": str(dhcp_relay['uplink_interface_link_local']), + "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr'])}, + log_file="/tmp/dhcpv6_relay_test.DHCPTest.log") diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh index 2790a490f88..69a2d9dc829 100755 --- a/tests/kvmtest.sh +++ b/tests/kvmtest.sh @@ -106,6 +106,7 @@ test_t0() { cacl/test_cacl_application.py \ cacl/test_cacl_function.py \ dhcp_relay/test_dhcp_relay.py \ + dhcp_relay/test_dhcpv6_relay.py \ lldp/test_lldp.py \ ntp/test_ntp.py \ pc/test_po_cleanup.py \ From d6ca7dd9419fcc907d52693d510ea10d096d817d Mon Sep 17 00:00:00 2001 From: ppikh <70200079+ppikh@users.noreply.github.com> Date: Wed, 4 Aug 2021 18:53:24 +0300 Subject: [PATCH 047/117] Allure report URL print moved to be at the end(as possible) of tests output (#3904) Description of PR Allure report URL print moved to be at the end(as possible) of tests output Summary: Allure report URL print moved to be at the end(as possible) of tests output Approach What is the motivation for this PR? It is better to find report URL at the end of test session output How did you do it? See code How did you verify/test it? Executed tests and checked allure report URL Signed-off-by: Petro Pikh --- tests/common/plugins/allure_server/__init__.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/common/plugins/allure_server/__init__.py b/tests/common/plugins/allure_server/__init__.py index c1c10649759..ab7fd05cd6f 100644 --- a/tests/common/plugins/allure_server/__init__.py +++ b/tests/common/plugins/allure_server/__init__.py @@ -8,6 +8,8 @@ logger = logging.getLogger() +ALLURE_REPORT_URL = 'allure_report_url' + def pytest_addoption(parser): """ @@ -45,7 +47,8 @@ def pytest_sessionfinish(session, exitstatus): try: allure_server_obj = AllureServer(allure_server_addr, allure_server_port, allure_report_dir, allure_server_project_id) - allure_server_obj.generate_allure_report() + report_url = allure_server_obj.generate_allure_report() + session.config.cache.set(ALLURE_REPORT_URL, report_url) except Exception as err: logger.error('Failed to upload allure report to server. Allure report not available. ' '\nError: {}'.format(err)) @@ -53,6 +56,14 @@ def pytest_sessionfinish(session, exitstatus): logger.error('PyTest argument "--alluredir" not provided. Impossible to generate Allure report') +def pytest_terminal_summary(terminalreporter, exitstatus, config): + report_url = config.cache.get(ALLURE_REPORT_URL, None) + if report_url: + logger.info('Allure report URL: {}'.format(report_url)) + else: + logger.error('Can not get Allure report URL. Please check logs') + + def get_setup_session_info(session): ansible_dir = get_ansible_path(session) testbed = session.config.option.testbed @@ -118,8 +129,9 @@ def generate_allure_report(self): """ self.create_project_on_allure_server() self.upload_results_to_allure_server() - self.generate_report_on_allure_server() + report_url = self.generate_report_on_allure_server() self.clean_results_on_allure_server() + return report_url def create_project_on_allure_server(self): """ @@ -184,7 +196,7 @@ def generate_report_on_allure_server(self): logger.error('Failed to generate report on allure server, error: {}'.format(response.content)) else: report_url = response.json()['data']['report_url'] - logger.info('Allure report URL: {}'.format(report_url)) + return report_url def clean_results_on_allure_server(self): """ From ac8c2d16c408ed915a6ae00f74759f0a58a0f480 Mon Sep 17 00:00:00 2001 From: AntonHryshchuk <76687950+AntonHryshchuk@users.noreply.github.com> Date: Wed, 4 Aug 2021 19:19:57 +0300 Subject: [PATCH 048/117] [mgmtvrf] curl test: prevent http server tumeout (#3802) Description of PR Summary: The test_curl starting temp_http_server.py script on ptf with timeout of 60 seconds. In this time the test creating cli command and sending request to the http server. But cli creation takes up to 80 seconds. The change provide another way to get kernel version and reducing the time of cli creation to ~1 second only. Approach What is the motivation for this PR? Bug fix How did you verify/test it? Run mvrf/test_mgmtvrf.py::TestMvrfOutbound::test_curl Or run full test mvrf/test_mgmtvrf.py Signed-off-by: Anton --- tests/mvrf/test_mgmtvrf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/mvrf/test_mgmtvrf.py b/tests/mvrf/test_mgmtvrf.py index f71e272e7a7..ab18fc0954c 100644 --- a/tests/mvrf/test_mgmtvrf.py +++ b/tests/mvrf/test_mgmtvrf.py @@ -92,8 +92,8 @@ def execute_dut_command(duthost, command, mvrf=True, ignore_errors=False): result = {} prefix = "" if mvrf: - dut_kernel = duthost.setup()['ansible_facts']['ansible_kernel'].split('-') - if parse_version(dut_kernel[0]) > parse_version("4.9.0"): + dut_kernel = duthost.shell("cat /proc/version | awk '{ print $3 }' | cut -d '-' -f 1")["stdout"] + if parse_version(dut_kernel) > parse_version("4.9.0"): prefix = "sudo ip vrf exec mgmt " else: prefix = "sudo cgexec -g l3mdev:mgmt " From b4a0b630fb842b7987000a376ac7138ff0dc7044 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Wed, 4 Aug 2021 10:59:53 -0700 Subject: [PATCH 049/117] [pfc] Add support for backend topology run (#3914) Signed-off-by: Neetha John This PR contains the following changes - Extend the pfc pause testcase to support backend topology - Correct the topology marker to T0 since the testcase currently only supports that (Was modified in Changes to run QOS testcase on 7260 64x100 with t1-64-lag topology #3713) - Use the mac address from the ptf interfaces since they are already unique Summary: Fixes #3869 How did you verify/test it? Ran the test with the changes on 't0' and 't0-backend' topology and it passed --- .../test/files/ptftests/pfc_pause_test.py | 153 +++++++++++------- tests/qos/qos_fixtures.py | 13 +- tests/qos/qos_helpers.py | 22 ++- tests/qos/test_pfc_pause.py | 50 +++--- 4 files changed, 152 insertions(+), 86 deletions(-) diff --git a/ansible/roles/test/files/ptftests/pfc_pause_test.py b/ansible/roles/test/files/ptftests/pfc_pause_test.py index ea41dfb22d3..e4431558886 100755 --- a/ansible/roles/test/files/ptftests/pfc_pause_test.py +++ b/ansible/roles/test/files/ptftests/pfc_pause_test.py @@ -55,12 +55,12 @@ def __init__(self): def setUp(self): add_filter(udp_filter) self.dataplane = ptf.dataplane_instance - self.mac_src = self.test_params['mac_src'] - self.mac_dst = self.test_params['mac_dst'] self.pkt_count = int(self.test_params['pkt_count']) self.pkt_intvl = float(self.test_params['pkt_intvl']) self.port_src = int(self.test_params['port_src']) self.port_dst = self.test_params['port_dst'] + self.mac_src = self.dataplane.get_mac(0, self.port_src) + self.mac_dst = self.dataplane.get_mac(0, self.port_dst) self.ip_src = self.test_params['ip_src'] self.ip_dst = self.test_params['ip_dst'] self.dscp = self.test_params['dscp'] @@ -70,11 +70,97 @@ def setUp(self): """ if DUT has MAC information """ self.dut_has_mac = self.test_params['dut_has_mac'] self.debug = self.test_params.get('debug', False) + self.vlan_id = self.test_params.get('vlan_id', None) + self.testbed_type = self.test_params.get('testbed_type', None) - def runTest(self): - pass_cnt = 0 + def construct_pkt(self, sport, dport): tos = self.dscp<<2 tos_bg = self.dscp_bg<<2 + + pkt_args = { + 'eth_dst': self.mac_dst, + 'eth_src': self.mac_src, + 'ip_src': self.ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos, + 'udp_sport': sport, + 'udp_dport': dport, + 'ip_ttl': 64 + } + if self.vlan_id is not None: + pkt_args['dl_vlan_enable'] = True + pkt_args['vlan_vid'] = int(self.vlan_id) + pkt_args['vlan_pcp'] = self.dscp + pkt = simple_udp_packet(**pkt_args) + + pkt_bg_args = { + 'eth_dst': self.mac_dst, + 'eth_src': self.mac_src, + 'ip_src': self.ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos_bg, + 'udp_sport': sport, + 'udp_dport': dport, + 'ip_ttl': 64 + } + if self.vlan_id is not None: + pkt_bg_args['dl_vlan_enable'] = True + pkt_bg_args['vlan_vid'] = int(self.vlan_id) + pkt_bg_args['vlan_pcp'] = self.dscp_bg + pkt_bg = simple_udp_packet(**pkt_bg_args) + + exp_pkt_args = { + 'ip_src': self.ip_src, + 'ip_dst': self.ip_dst, + 'ip_tos': tos_bg, + 'udp_sport': sport, + 'udp_dport': dport, + 'ip_ttl': 63 + } + if self.vlan_id is not None: + exp_pkt_args['dl_vlan_enable'] = True + exp_pkt_args['vlan_vid'] = int(self.vlan_id) + exp_pkt_args['vlan_pcp'] = self.dscp_bg + exp_pkt = simple_udp_packet(**exp_pkt_args) + + masked_exp_pkt = Mask(exp_pkt) + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "tos") + if 'backend' in self.testbed_type: + masked_exp_pkt.set_do_not_care_scapy(scapy.Dot1Q, "prio") + + return pkt, pkt_bg, masked_exp_pkt + + def populate_fdb(self): + pkt_args = { + 'eth_dst': self.mac_dst, + 'eth_src': self.mac_src, + 'ip_src': self.ip_src, + 'ip_dst': self.ip_dst + } + if self.vlan_id is not None: + pkt_args['dl_vlan_enable'] = True + pkt_args['vlan_vid'] = int(self.vlan_id) + pkt_args['vlan_pcp'] = 0 + + pkt = simple_udp_packet(**pkt_args) + + send_packet(self, self.port_src, pkt, 5) + + pkt_args['eth_dst'] = self.mac_src + pkt_args['eth_src'] = self.mac_dst + pkt_args['ip_src'] = self.ip_dst + pkt_args['ip_dst'] = self.ip_src + + pkt = simple_udp_packet(**pkt_args) + + send_packet(self, self.port_dst, pkt, 5) + + def runTest(self): + pass_cnt = 0 if self.debug: # remove previous debug files files = glob.glob("/tmp/pfc_pause_{}*".format(self.dscp)) @@ -82,64 +168,15 @@ def runTest(self): os.remove(file) current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") log_file = open("/tmp/pfc_pause_{}_{}".format(self.dscp, current_time), "w") - """ If DUT needs to learn MAC addresses """ if not self.dut_has_mac: - pkt = simple_udp_packet( - eth_dst=self.mac_dst, - eth_src=self.mac_src, - ip_src=self.ip_src, - ip_dst=self.ip_dst) - - send_packet(self, self.port_src, pkt, 5) - - pkt = simple_udp_packet( - eth_dst=self.mac_src, - eth_src=self.mac_dst, - ip_src=self.ip_dst, - ip_dst=self.ip_src) - - send_packet(self, self.port_dst, pkt, 5) + self.populate_fdb() for x in range(self.pkt_count): sport = random.randint(0, 65535) dport = random.randint(0, 65535) - pkt = simple_udp_packet( - eth_dst=self.mac_dst, - eth_src=self.mac_src, - ip_src=self.ip_src, - ip_dst=self.ip_dst, - ip_tos=tos, - udp_sport=sport, - udp_dport=dport, - ip_ttl=64) - - pkt_bg = simple_udp_packet( - eth_dst=self.mac_dst, - eth_src=self.mac_src, - ip_src=self.ip_src, - ip_dst=self.ip_dst, - ip_tos=tos_bg, - udp_sport=sport, - udp_dport=dport, - ip_ttl=64) - - exp_pkt = simple_udp_packet( - ip_src=self.ip_src, - ip_dst=self.ip_dst, - ip_tos=tos_bg, - udp_sport=sport, - udp_dport=dport, - ip_ttl=63) - - masked_exp_pkt = Mask(exp_pkt) - masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") - masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "tos") - + pkt, pkt_bg, masked_exp_pkt = self.construct_pkt(sport, dport) send_packet(self, self.port_src, pkt, 1) send_packet(self, self.port_src, pkt_bg, 1) @@ -154,7 +191,11 @@ def runTest(self): """ If the queue is paused, we should only receive the background packet """ if self.queue_paused: - pass_cnt += int(len(pkts) == 1 and scapy.Ether(pkts[0])[scapy.IP].tos == tos_bg) + if 'backend' in self.testbed_type: + filter_expr = (hex(scapy.Ether(pkts[0]).type) == '0x8100' and int(scapy.Ether(pkts[0])[scapy.Dot1Q].prio) == self.dscp_bg and int(scapy.Ether(pkts[0])[scapy.Dot1Q].vlan) == self.vlan_id) + else: + filter_expr = (scapy.Ether(pkts[0])[scapy.IP].tos == tos_bg) + pass_cnt += int(len(pkts) == 1 and filter_expr) else: pass_cnt += int(len(pkts) == 2) diff --git a/tests/qos/qos_fixtures.py b/tests/qos/qos_fixtures.py index 54096abb892..dbb7f7752d7 100644 --- a/tests/qos/qos_fixtures.py +++ b/tests/qos/qos_fixtures.py @@ -17,19 +17,22 @@ def lossless_prio_dscp_map(duthosts, rand_one_dut_hostname): return None lossless_priorities = [int(x) for x in port_qos_map[intf]['pfc_enable'].split(',')] - dscp_to_tc_map = config_facts["DSCP_TO_TC_MAP"] + if "DSCP_TO_TC_MAP" in config_facts: + prio_to_tc_map = config_facts["DSCP_TO_TC_MAP"] + elif "DOT1P_TO_TC_MAP" in config_facts: + prio_to_tc_map = config_facts["DOT1P_TO_TC_MAP"] result = dict() for prio in lossless_priorities: result[prio] = list() - profile = dscp_to_tc_map.keys()[0] + profile = prio_to_tc_map.keys()[0] - for dscp in dscp_to_tc_map[profile]: - tc = dscp_to_tc_map[profile][dscp] + for prio in prio_to_tc_map[profile]: + tc = prio_to_tc_map[profile][prio] if int(tc) in lossless_priorities: - result[int(tc)].append(int(dscp)) + result[int(tc)].append(int(prio)) return result diff --git a/tests/qos/qos_helpers.py b/tests/qos/qos_helpers.py index 67d2fffdd9d..bf77096f3fb 100644 --- a/tests/qos/qos_helpers.py +++ b/tests/qos/qos_helpers.py @@ -42,7 +42,7 @@ def get_phy_intfs(host_ans): @return: Return the list of active interfaces """ intf_facts = host_ans.interface_facts()['ansible_facts']['ansible_interface_facts'] - phy_intfs = [k for k in intf_facts.keys() if k.startswith('Ethernet')] + phy_intfs = [k for k in intf_facts.keys() if k.startswith('Ethernet') and "." not in k] return phy_intfs def get_active_intfs(host_ans): @@ -124,12 +124,15 @@ def get_active_vlan_members(host_ans): """ Get all the Vlan memebrs """ vlan_intf = mg_vlans.keys()[0] vlan_members = mg_vlans[vlan_intf]['members'] + vlan_id = None + if 'type' in mg_vlans[vlan_intf] and mg_vlans[vlan_intf]['type'] is not None and 'Tagged' in mg_vlans[vlan_intf]['type']: + vlan_id = mg_vlans[vlan_intf]['vlanid'] """ Filter inactive Vlan members """ active_intfs = get_active_intfs(host_ans) vlan_members = [x for x in vlan_members if x in active_intfs] - return vlan_members + return vlan_members, vlan_id def get_vlan_subnet(host_ans): """ @@ -198,3 +201,18 @@ def setup_testbed(fanouthosts, ptfhost, leaf_fanouts): for peer_device in leaf_fanouts: peerdev_ans = fanouthosts[peer_device] stop_pause(peerdev_ans, PFC_GEN_FILE) + +def get_max_priority(testbed_type): + """ + Returns the maximum priority supported by a testbed type + + Args: + testbed_type(string): testbed topology + + Returns: + max_prio(string): Maximum priority that is applicable based on testbed type + """ + if 'backend' in testbed_type: + return 8 + else: + return 64 diff --git a/tests/qos/test_pfc_pause.py b/tests/qos/test_pfc_pause.py index a6df03d9b72..dc846eef11a 100644 --- a/tests/qos/test_pfc_pause.py +++ b/tests/qos/test_pfc_pause.py @@ -1,23 +1,25 @@ import logging import os import pytest -import random import time from qos_fixtures import lossless_prio_dscp_map -from qos_helpers import ansible_stdout_to_str, get_phy_intfs, get_addrs_in_subnet, get_active_vlan_members, get_vlan_subnet, natural_keys +from qos_helpers import ansible_stdout_to_str, get_phy_intfs, get_addrs_in_subnet, get_active_vlan_members, get_vlan_subnet, natural_keys, get_max_priority from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode from tests.common.helpers.assertions import pytest_assert from tests.common.helpers.pfc_storm import PFCStorm pytestmark = [ - pytest.mark.topology('any') + pytest.mark.topology('t0') ] logger = logging.getLogger(__name__) +PTF_PORT_MAPPING_MODE = 'use_orig_interface' + PFC_PKT_COUNT = 1000000000 PTF_FILE_REMOTE_PATH = '~/ptftests/pfc_pause_test.py' @@ -29,7 +31,7 @@ MAX_TEST_INTFS_COUNT = 4 @pytest.fixture(scope="module", autouse=True) -def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo): +def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo, ptfhost): """ Generate configurations for the tests @@ -44,7 +46,7 @@ def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo): """ Get all the active physical interfaces enslaved to the Vlan """ """ These interfaces are actually server-faced interfaces at T0 """ duthost = duthosts[rand_one_dut_hostname] - vlan_members = get_active_vlan_members(duthost) + vlan_members, vlan_id = get_active_vlan_members(duthost) """ Get Vlan subnet """ vlan_subnet = get_vlan_subnet(duthost) @@ -52,9 +54,6 @@ def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo): """ Generate IP addresses for servers in the Vlan """ vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(vlan_members)) - """ Generate MAC addresses 00:00:00:00:00:XX for servers in the Vlan """ - vlan_mac_addrs = [5 * '00:' + format(k, '02x') for k in random.sample(range(1, 256), len(vlan_members))] - """ Find correspoinding interfaces on PTF """ phy_intfs = get_phy_intfs(duthost) phy_intfs.sort(key=natural_keys) @@ -62,16 +61,23 @@ def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo): vlan_members_index = [phy_intfs.index(intf) for intf in vlan_members] ptf_intfs = ['eth' + str(i) for i in vlan_members_index] + duthost.command('sonic-clear fdb all') + """ Disable DUT's PFC wd """ duthost.shell('sudo pfcwd stop') + testbed_type = tbinfo['topo']['name'] + yield { 'vlan_members': vlan_members, + 'vlan_id': vlan_id, 'ptf_intfs': ptf_intfs, 'vlan_ip_addrs': vlan_ip_addrs, - 'vlan_mac_addrs': vlan_mac_addrs + 'testbed_type': testbed_type } + duthost.command('sonic-clear fdb all') + """ Enable DUT's PFC wd """ if 'dualtor' not in tbinfo['topo']['name']: duthost.shell('sudo pfcwd start_default') @@ -101,10 +107,11 @@ def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, """ setup = pfc_test_setup + testbed_type = setup['testbed_type'] dut_intfs = setup['vlan_members'] + vlan_id = setup['vlan_id'] ptf_intfs = setup['ptf_intfs'] ptf_ip_addrs = setup['vlan_ip_addrs'] - ptf_mac_addrs = setup['vlan_mac_addrs'] """ Clear DUT's PFC counters """ duthost.sonic_pfc_counters(method="clear") @@ -123,16 +130,9 @@ def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, src_ip = ptf_ip_addrs[src_index] dst_ip = ptf_ip_addrs[dst_index] - src_mac = ptf_mac_addrs[src_index] - dst_mac = ptf_mac_addrs[dst_index] - """ DUT interface to pause """ dut_intf_paused = dut_intfs[dst_index] - """ Clear MAC table in DUT """ - duthost.shell('sonic-clear fdb all') - time.sleep(2) - if send_pause: peer_device = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerdevice'] peer_port = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerport'] @@ -161,9 +161,7 @@ def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, logger.info("Running test: src intf: {} dest intf: {}".format(dut_intfs[src_index], dut_intfs[dst_index])) intf_info = '--interface %d@%s --interface %d@%s' % (src_index, src_intf, dst_index, dst_intf) - test_params = ("mac_src=\'%s\';" % src_mac - + "mac_dst=\'%s\';" % dst_mac - + "ip_src=\'%s\';" % src_ip + test_params = ("ip_src=\'%s\';" % src_ip + "ip_dst=\'%s\';" % dst_ip + "dscp=%d;" % traffic_params['dscp'] + "dscp_bg=%d;" % traffic_params['dscp_bg'] @@ -172,7 +170,9 @@ def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, + "port_src=%d;" % src_index + "port_dst=%d;" % dst_index + "queue_paused=%s;" % queue_paused - + "dut_has_mac=False") + + "dut_has_mac=False;" + + "vlan_id=%s;" % vlan_id + + "testbed_type=\'%s\'" % testbed_type) cmd = 'ptf --test-dir %s pfc_pause_test %s --test-params="%s"' % (os.path.dirname(PTF_FILE_REMOTE_PATH), intf_info, test_params) print cmd @@ -223,6 +223,7 @@ def test_pfc_pause_lossless(pfc_test_setup, fanouthosts, duthost, ptfhost, test_errors = "" errors = [] + setup = pfc_test_setup prio = int(enum_dut_lossless_prio.split('|')[-1]) dscp = lossless_prio_dscp_map[prio] other_lossless_prio = 4 if prio == 3 else 3 @@ -231,7 +232,8 @@ def test_pfc_pause_lossless(pfc_test_setup, fanouthosts, duthost, ptfhost, """ DSCP values for other lossless priority """ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio] """ DSCP values for lossy priorities """ - lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp)) + max_priority = get_max_priority(setup['testbed_type']) + lossy_dscps = list(set(range(max_priority)) - set(other_lossless_dscps) - set(dscp)) """ We also need to test some DSCP values for lossy priorities """ other_dscps = other_lossless_dscps + lossy_dscps[0:2] @@ -295,6 +297,7 @@ def test_no_pfc(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, test_errors = "" errors = [] + setup = pfc_test_setup prio = int(enum_dut_lossless_prio.split('|')[-1]) dscp = lossless_prio_dscp_map[prio] other_lossless_prio = 4 if prio == 3 else 3 @@ -302,7 +305,8 @@ def test_no_pfc(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, """ DSCP values for other lossless priority """ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio] """ DSCP values for lossy priorities """ - lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp)) + max_priority = get_max_priority(setup['testbed_type']) + lossy_dscps = list(set(range(max_priority)) - set(other_lossless_dscps) - set(dscp)) """ We also need to test some DSCP values for lossy priorities """ other_dscps = other_lossless_dscps + lossy_dscps[0:2] From 75c551b8629e5fd9cdd3b45a9114ed5c77ce0fe1 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Wed, 4 Aug 2021 11:03:20 -0700 Subject: [PATCH 050/117] [rdma] Skip warm reboot tests on Td2 (#3930) Warm reboot is not supported on Td2. Hence skipping those 'tgen' testcases Signed-off-by: Neetha John How did you verify/test it? ixia/pfc/test_pfc_pause_lossy.py::test_pfc_pause_single_lossy_prio_reboot[warm] SKIPPED [ 33%] ixia/pfc/test_pfc_pause_lossy.py::test_pfc_pause_single_lossy_prio_reboot[cold] PASSED [ 66%] ixia/pfc/test_pfc_pause_lossy.py::test_pfc_pause_single_lossy_prio_reboot[fast] PASSED [100%] =========================================================================== SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/common/helpers/assertions.py:13: Warm reboot is not supported on Td2 --- tests/ixia/__init__.py | 0 tests/ixia/files/__init__.py | 0 tests/ixia/files/helper.py | 17 +++++++++++++++++ tests/ixia/pfc/test_pfc_pause_lossless.py | 9 +++++++-- tests/ixia/pfc/test_pfc_pause_lossy.py | 9 +++++++-- tests/ixia/pfcwd/test_pfcwd_basic.py | 3 +++ 6 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 tests/ixia/__init__.py create mode 100644 tests/ixia/files/__init__.py create mode 100644 tests/ixia/files/helper.py diff --git a/tests/ixia/__init__.py b/tests/ixia/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/ixia/files/__init__.py b/tests/ixia/files/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/ixia/files/helper.py b/tests/ixia/files/helper.py new file mode 100644 index 00000000000..680a530840c --- /dev/null +++ b/tests/ixia/files/helper.py @@ -0,0 +1,17 @@ +from tests.common.broadcom_data import is_broadcom_device +from tests.common.helpers.assertions import pytest_require + +def skip_warm_reboot(duthost, reboot_type): + """ + Skip warm reboot tests for TD2 asics + + Args: + duthost (pytest fixture): device under test + reboot_type (string): type of reboot (can be warm, cold, fast) + + Returns: + None + """ + SKIP_LIST = ["td2"] + asic_type = duthost.get_asic_name() + pytest_require(not (is_broadcom_device(duthost) and asic_type in SKIP_LIST and "warm" in reboot_type), "Warm reboot is not supported on {}".format(asic_type)) diff --git a/tests/ixia/pfc/test_pfc_pause_lossless.py b/tests/ixia/pfc/test_pfc_pause_lossless.py index a86a95ba803..46d0b46493e 100644 --- a/tests/ixia/pfc/test_pfc_pause_lossless.py +++ b/tests/ixia/pfc/test_pfc_pause_lossless.py @@ -11,6 +11,7 @@ lossy_prio_list from tests.common.reboot import reboot from tests.common.utilities import wait_until +from tests.ixia.files.helper import skip_warm_reboot logger = logging.getLogger(__name__) @@ -166,8 +167,10 @@ def test_pfc_pause_single_lossless_prio_reboot(ixia_api, pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2, "Priority and port are not mapped to the expected DUT") - testbed_config, port_config_list = ixia_testbed_config duthost = duthosts[rand_one_dut_hostname] + skip_warm_reboot(duthost, reboot_type) + + testbed_config, port_config_list = ixia_testbed_config lossless_prio = int(lossless_prio) pause_prio_list = [lossless_prio] @@ -234,8 +237,10 @@ def test_pfc_pause_multi_lossless_prio_reboot(ixia_api, pytest_require(rand_one_dut_hostname == dut_hostname, "Port is not mapped to the expected DUT") - testbed_config, port_config_list = ixia_testbed_config duthost = duthosts[rand_one_dut_hostname] + skip_warm_reboot(duthost, reboot_type) + + testbed_config, port_config_list = ixia_testbed_config pause_prio_list = lossless_prio_list test_prio_list = lossless_prio_list bg_prio_list = lossy_prio_list diff --git a/tests/ixia/pfc/test_pfc_pause_lossy.py b/tests/ixia/pfc/test_pfc_pause_lossy.py index 3b8965f9640..136f300c940 100644 --- a/tests/ixia/pfc/test_pfc_pause_lossy.py +++ b/tests/ixia/pfc/test_pfc_pause_lossy.py @@ -11,6 +11,7 @@ lossy_prio_list from tests.common.reboot import reboot from tests.common.utilities import wait_until +from tests.ixia.files.helper import skip_warm_reboot logger = logging.getLogger(__name__) @@ -166,8 +167,10 @@ def test_pfc_pause_single_lossy_prio_reboot(ixia_api, pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2, "Priority and port are not mapped to the expected DUT") - testbed_config, port_config_list = ixia_testbed_config duthost = duthosts[rand_one_dut_hostname] + skip_warm_reboot(duthost, reboot_type) + + testbed_config, port_config_list = ixia_testbed_config lossy_prio = int(lossy_prio) pause_prio_list = [lossy_prio] @@ -234,8 +237,10 @@ def test_pfc_pause_multi_lossy_prio_reboot(ixia_api, pytest_require(rand_one_dut_hostname == dut_hostname, "Port is not mapped to the expected DUT") - testbed_config, port_config_list = ixia_testbed_config duthost = duthosts[rand_one_dut_hostname] + skip_warm_reboot(duthost, reboot_type) + + testbed_config, port_config_list = ixia_testbed_config pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list bg_prio_list = lossless_prio_list diff --git a/tests/ixia/pfcwd/test_pfcwd_basic.py b/tests/ixia/pfcwd/test_pfcwd_basic.py index db1c794680b..284a06c53fd 100644 --- a/tests/ixia/pfcwd/test_pfcwd_basic.py +++ b/tests/ixia/pfcwd/test_pfcwd_basic.py @@ -8,6 +8,7 @@ from tests.common.ixia.qos_fixtures import prio_dscp_map, lossless_prio_list from tests.common.reboot import reboot from tests.common.utilities import wait_until +from tests.ixia.files.helper import skip_warm_reboot from files.pfcwd_basic_helper import run_pfcwd_basic_test from files.helper import skip_pfcwd_test @@ -158,6 +159,7 @@ def test_pfcwd_basic_single_lossless_prio_reboot(ixia_api, duthost = duthosts[rand_one_dut_hostname] skip_pfcwd_test(duthost=duthost, trigger_pfcwd=trigger_pfcwd) + skip_warm_reboot(duthost, reboot_type) testbed_config, port_config_list = ixia_testbed_config lossless_prio = int(lossless_prio) @@ -221,6 +223,7 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(ixia_api, duthost = duthosts[rand_one_dut_hostname] skip_pfcwd_test(duthost=duthost, trigger_pfcwd=trigger_pfcwd) + skip_warm_reboot(duthost, reboot_type) testbed_config, port_config_list = ixia_testbed_config From 55261ca2761c97ffbea64f1fc537c82a96fbe154 Mon Sep 17 00:00:00 2001 From: AndoniSanguesa <31708881+AndoniSanguesa@users.noreply.github.com> Date: Wed, 4 Aug 2021 16:19:53 -0700 Subject: [PATCH 051/117] Extend cacl function test (#3895) Description of PR Added NTP capability to the existing Control Plane ACL functional test. Previously only SSH and NTP were supported. The docker container python dependencies defined by the sonic-buildimage repo does not include ntplib which is used by this addition. The parts of the test that use ntplib are disabled if the ntplib module is not present. Summary: Fixes #3597 Approach What is the motivation for this PR? There was no existing support for NTP in existing cacl function test. How did you do it? Configured NTP ACL table in the minigraph xml files. Modified existing cacl config file to configure the new NTP ACL table. Sent NTP requests with and without the CACL configured. Also made some changes to timeouts and added optional timeout argument to the snmp_facts ansible module. This change greatly improved test stability locally. How did you verify/test it? Ran tests multiple times with no errors. Manually checked that the CACL configuration was stopping communication on specified protocols via cli. Co-authored-by: Andoni Sanguesa --- ansible/library/snmp_facts.py | 3 +- ansible/minigraph/lab-a7260-01.t0-116.xml | 5 ++ ansible/minigraph/lab-s6000-01.t0.xml | 5 ++ ansible/minigraph/lab-s6100-01.t0-64.xml | 5 ++ ansible/minigraph/lab-s6100-01.t1-64-lag.xml | 5 ++ ansible/minigraph/lab-s6100-01.t1-64.xml | 5 ++ ansible/minigraph/str-msn2700-01.t0.xml | 5 ++ ansible/minigraph/str-msn2700-01.t1-lag.xml | 5 ++ ansible/minigraph/str-msn2700-01.t1.xml | 5 ++ ansible/minigraph/vlab-08.t1-8-lag.xml | 5 ++ .../test/files/helpers/config_service_acls.sh | 27 ++++++- ansible/templates/minigraph_dpg.j2 | 5 ++ docs/testplan/CACL-function-test-plan.md | 22 ++++++ tests/cacl/test_cacl_function.py | 76 +++++++++++++------ tests/common/helpers/snmp_helpers.py | 4 +- 15 files changed, 155 insertions(+), 27 deletions(-) create mode 100644 docs/testplan/CACL-function-test-plan.md diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py index 91903231c6f..2f93ebc8470 100644 --- a/ansible/library/snmp_facts.py +++ b/ansible/library/snmp_facts.py @@ -298,6 +298,7 @@ def main(): module = AnsibleModule( argument_spec=dict( host=dict(required=True), + timeout=dict(reqired=False, type='int', default=5), version=dict(required=True, choices=['v2', 'v2c', 'v3']), community=dict(required=False, default=False), username=dict(required=False), @@ -367,7 +368,7 @@ def main(): # (e.g. S6000) when cpu utilization is high, increse timeout to tolerate the delay. errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=5.0), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), cmdgen.MibVariable(p.sysDescr,), ) diff --git a/ansible/minigraph/lab-a7260-01.t0-116.xml b/ansible/minigraph/lab-a7260-01.t0-116.xml index f5e8682a89c..188d60f69c1 100644 --- a/ansible/minigraph/lab-a7260-01.t0-116.xml +++ b/ansible/minigraph/lab-a7260-01.t0-116.xml @@ -284,6 +284,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/lab-s6000-01.t0.xml b/ansible/minigraph/lab-s6000-01.t0.xml index 8e93d8c810a..b9a4af663ef 100644 --- a/ansible/minigraph/lab-s6000-01.t0.xml +++ b/ansible/minigraph/lab-s6000-01.t0.xml @@ -284,6 +284,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/lab-s6100-01.t0-64.xml b/ansible/minigraph/lab-s6100-01.t0-64.xml index 32b1a4e7de6..5ed5afde3d4 100644 --- a/ansible/minigraph/lab-s6100-01.t0-64.xml +++ b/ansible/minigraph/lab-s6100-01.t0-64.xml @@ -284,6 +284,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/lab-s6100-01.t1-64-lag.xml b/ansible/minigraph/lab-s6100-01.t1-64-lag.xml index d6eb90ecf31..025bef62892 100644 --- a/ansible/minigraph/lab-s6100-01.t1-64-lag.xml +++ b/ansible/minigraph/lab-s6100-01.t1-64-lag.xml @@ -1145,6 +1145,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/lab-s6100-01.t1-64.xml b/ansible/minigraph/lab-s6100-01.t1-64.xml index d0a516eb474..7e606f07ffb 100644 --- a/ansible/minigraph/lab-s6100-01.t1-64.xml +++ b/ansible/minigraph/lab-s6100-01.t1-64.xml @@ -2625,6 +2625,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/str-msn2700-01.t0.xml b/ansible/minigraph/str-msn2700-01.t0.xml index 90cfd8542a5..c784dbd9ba6 100644 --- a/ansible/minigraph/str-msn2700-01.t0.xml +++ b/ansible/minigraph/str-msn2700-01.t0.xml @@ -284,6 +284,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/str-msn2700-01.t1-lag.xml b/ansible/minigraph/str-msn2700-01.t1-lag.xml index 0236078ac7b..d71472c4637 100644 --- a/ansible/minigraph/str-msn2700-01.t1-lag.xml +++ b/ansible/minigraph/str-msn2700-01.t1-lag.xml @@ -1065,6 +1065,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/str-msn2700-01.t1.xml b/ansible/minigraph/str-msn2700-01.t1.xml index b1b449378af..9a2e4de13f6 100644 --- a/ansible/minigraph/str-msn2700-01.t1.xml +++ b/ansible/minigraph/str-msn2700-01.t1.xml @@ -1345,6 +1345,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/minigraph/vlab-08.t1-8-lag.xml b/ansible/minigraph/vlab-08.t1-8-lag.xml index 1964edc8f78..da0c2c70402 100644 --- a/ansible/minigraph/vlab-08.t1-8-lag.xml +++ b/ansible/minigraph/vlab-08.t1-8-lag.xml @@ -547,6 +547,11 @@ + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/ansible/roles/test/files/helpers/config_service_acls.sh b/ansible/roles/test/files/helpers/config_service_acls.sh index c8c136efeb9..6c77f38c65a 100755 --- a/ansible/roles/test/files/helpers/config_service_acls.sh +++ b/ansible/roles/test/files/helpers/config_service_acls.sh @@ -65,6 +65,31 @@ cat << EOF > /tmp/testacl.json "config": { "name": "ssh-only" } + }, + "ntp-acl": { + "acl-entries": { + "acl-entry": { + "1": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 1 + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "source-ip-address": "1.1.1.1/32" + } + } + } + } + }, + "config": { + "name": "ntp-acl" + } } } } @@ -80,7 +105,7 @@ logger -t cacltest "added cacl test rules" iptables -nL | logger -t cacltest # Sleep to allow Ansible playbook ample time to attempt to connect and timeout -sleep 60 +sleep 120 # Delete the test ACL config file rm -rf /tmp/testacl.json diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2 index 92f606234d0..821bec08dc0 100644 --- a/ansible/templates/minigraph_dpg.j2 +++ b/ansible/templates/minigraph_dpg.j2 @@ -191,6 +191,11 @@ {% if card_type is not defined or card_type != 'supervisor' %} + + NTP_ACL + NTP + NTP + SNMP_ACL SNMP diff --git a/docs/testplan/CACL-function-test-plan.md b/docs/testplan/CACL-function-test-plan.md new file mode 100644 index 00000000000..b55791d3d7d --- /dev/null +++ b/docs/testplan/CACL-function-test-plan.md @@ -0,0 +1,22 @@ +# Control Plane Access List Function Test Plan +- [Overview](#overview) +- [Test Procedure](#test-procedure) + +## Overview +This test aims to ensure that a configured Control Plane Access List (CACL) is able to DROP incoming packets from specified incoming IP addresses using a variety of protocols such as SSH, SNMP, and NTP. + +## Test Procedure + +1. Test that SNMP works initially, before CACL configuration +2. Send NTP request initially, before CACL configuration +3. Copy CACL config shell file to DUT and run file + - Once the CACL is configured, it stays active for 2 mintues before being automatically removed +4. Have localhost wait for the SSH port on the DUT to be stopped. Test will fail if the port doesn't stop + - This confirms that the CACL has been configured +5. Check that the SSH port is up expecting a `False` result +6. Send SNMP request expecting no response from the DUT +7. Send NTP request expecting an exception +8. Have localhost wait for CACL to be removed +9. Delete config file from the dut +10. Send SNMP request and ensure that a response is sent +11. Send NTP request and expect no exception \ No newline at end of file diff --git a/tests/cacl/test_cacl_function.py b/tests/cacl/test_cacl_function.py index 826248596de..d03e111ee7c 100644 --- a/tests/cacl/test_cacl_function.py +++ b/tests/cacl/test_cacl_function.py @@ -1,4 +1,13 @@ import pytest +import logging +from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.snmp_helpers import get_snmp_facts + +try: + import ntplib + NTPLIB_INSTALLED = True +except ImportError: + NTPLIB_INSTALLED = False from tests.common.helpers.snmp_helpers import get_snmp_facts @@ -13,16 +22,32 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): - """Test control plane ACL functionality on a SONiC device - """ + """Test control plane ACL functionality on a SONiC device""" + duthost = duthosts[rand_one_dut_hostname] dut_mgmt_ip = duthost.mgmt_ip - # Ensure we can gather basic SNMP facts from the device - res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity']) - - if 'ansible_facts' not in res: - pytest.fail("Failed to retrieve SNMP facts from DuT!") + # Start an NTP client + if NTPLIB_INSTALLED: + ntp_client = ntplib.NTPClient() + else: + logging.warning("Will not check NTP connection. ntplib is not installed.") + + # Ensure we can gather basic SNMP facts from the device. Should fail on timeout + get_snmp_facts(localhost, + host=dut_mgmt_ip, + version="v2c", + community=creds['snmp_rocommunity'], + wait=True, + timeout = 20, + interval=20) + + # Ensure we can send an NTP request + if NTPLIB_INSTALLED: + try: + ntp_client.request(dut_mgmt_ip) + except ntplib.NTPException: + pytest.fail("NTP did timed out when expected to succeed!") # Copy config_service_acls.sh to the DuT (this also implicitly verifies we can successfully SSH to the DuT) duthost.copy(src="scripts/config_service_acls.sh", dest="/tmp/config_service_acls.sh", mode="0755") @@ -40,12 +65,11 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): port=SONIC_SSH_PORT, state='stopped', search_regex=SONIC_SSH_REGEX, - delay=0, - timeout=20, + delay=30, + timeout=40, module_ignore_errors=True) - if res.is_failed: - pytest.fail("SSH port did not stop. {}".format(res.get('msg', ''))) + pytest_assert(not res.is_failed, "SSH port did not stop. {}".format(res.get('msg', ''))) # Try to SSH back into the DuT, it should time out res = localhost.wait_for(host=dut_mgmt_ip, @@ -56,16 +80,21 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): timeout=10, module_ignore_errors=True) - if not res.is_failed: - pytest.fail("SSH did not timeout when expected. {}".format(res.get('msg', ''))) + pytest_assert(res.is_failed, "SSH did not timeout when expected. {}".format(res.get('msg', ''))) # Ensure we CANNOT gather basic SNMP facts from the device res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], module_ignore_errors=True) - if 'ansible_facts' in res or "No SNMP response received before timeout" not in res.get('msg', ''): - pytest.fail("SNMP did not time out when expected") + pytest_assert('ansible_facts' not in res and "No SNMP response received before timeout" in res.get('msg', '')) + # Ensure we cannot send an NTP request to the DUT + if NTPLIB_INSTALLED: + try: + ntp_client.request(dut_mgmt_ip) + pytest.fail("NTP did not time out when expected") + except ntplib.NTPException: + pass # Wait until the original service ACLs are reinstated and the SSH port on the # DUT is open to us once again. Note that the timeout here should be set sufficiently @@ -79,15 +108,16 @@ def test_cacl_function(duthosts, rand_one_dut_hostname, localhost, creds): timeout=90, module_ignore_errors=True) - if res.is_failed: - pytest.fail("SSH did not start working when expected. {}".format(res.get('msg', ''))) + pytest_assert(not res.is_failed, "SSH did not start working when expected. {}".format(res.get('msg', ''))) # Delete config_service_acls.sh from the DuT duthost.file(path="/tmp/config_service_acls.sh", state="absent") - # Ensure we can gather basic SNMP facts from the device once again - res = get_snmp_facts(localhost, host=dut_mgmt_ip, version='v2c', community=creds['snmp_rocommunity'], - module_ignore_errors=True) - - if 'ansible_facts' not in res: - pytest.fail("Failed to retrieve SNMP facts from DuT!") + # Ensure we can gather basic SNMP facts from the device once again. Should fail on timeout + get_snmp_facts(localhost, + host=dut_mgmt_ip, + version="v2c", + community=creds['snmp_rocommunity'], + wait=True, + timeout = 20, + interval=20) diff --git a/tests/common/helpers/snmp_helpers.py b/tests/common/helpers/snmp_helpers.py index 6e404ba840a..85fb959abd7 100644 --- a/tests/common/helpers/snmp_helpers.py +++ b/tests/common/helpers/snmp_helpers.py @@ -29,13 +29,13 @@ def _update_snmp_facts(localhost, host, version, community, is_dell): def get_snmp_facts(localhost, host, version, community, is_dell=False, module_ignore_errors=False, - wait=False, timeouot=DEF_WAIT_TIMEOUT, interval=DEF_CHECK_INTERVAL): + wait=False, timeout=DEF_WAIT_TIMEOUT, interval=DEF_CHECK_INTERVAL): if not wait: return _get_snmp_facts(localhost, host, version, community, is_dell, module_ignore_errors) global global_snmp_facts - pytest_assert(wait_until(timeouot, interval, _update_snmp_facts, localhost, host, version, + pytest_assert(wait_until(timeout, interval, _update_snmp_facts, localhost, host, version, community, is_dell), "Timeout waiting for SNMP facts") return global_snmp_facts From 7b23b9469d4a90dae3d796127e6464545fbdcad1 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Thu, 5 Aug 2021 08:58:14 +0800 Subject: [PATCH 052/117] [test_hash] Enable storage backend topologies (#3943) What is the motivation for this PR? * Fix the error introduced by PR Update IP decap testing for dualtor #3702 that wipes out the changes in PR [test_fib] Enable backend topologies #3734 How did you do it? * add a new fixture fib_info_files_per_function that is used exclusively for tests in test_fib.py that requires fib info files per test function call. How did you verify/test it? --- tests/common/fixtures/fib_utils.py | 46 +++++++++++++++++++++++++++++- tests/fib/test_fib.py | 11 +++---- 2 files changed, 51 insertions(+), 6 deletions(-) diff --git a/tests/common/fixtures/fib_utils.py b/tests/common/fixtures/fib_utils.py index ebaad821248..f92b439a25e 100644 --- a/tests/common/fixtures/fib_utils.py +++ b/tests/common/fixtures/fib_utils.py @@ -115,6 +115,7 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): po = asic_cfg_facts.get('PORTCHANNEL', {}) ports = asic_cfg_facts.get('PORT', {}) + sub_interfaces = asic_cfg_facts.get('VLAN_SUB_INTERFACE', {}) with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: fib = json.load(fp) @@ -135,7 +136,9 @@ def get_fib_info(duthost, dut_cfg_facts, duts_mg_facts): else: oports.append([str(duts_mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) else: - if ports.has_key(ifname): + if sub_interfaces.has_key(ifname): + oports.append([str(duts_mg_facts['minigraph_ptf_indices'][ifname.split('.')[0]])]) + elif ports.has_key(ifname): if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int': skip = True else: @@ -214,3 +217,44 @@ def fib_info_files(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_ files.append(filename) return files + + +@pytest.fixture(scope='function') +def fib_info_files_per_function(duthosts, ptfhost, duts_running_config_facts, duts_minigraph_facts, tbinfo, request): + """Get FIB info from database and store to text files on PTF host. + + For T2 topology, generate a single file to /root/fib_info_all_duts.txt to PTF host. + For other topologies, generate one file for each duthost. File name pattern: + /root/fib_info_dut.txt + + Args: + duthosts (DutHosts): Instance of DutHosts for interacting with DUT hosts. + ptfhost (PTFHost): Instance of PTFHost for interacting with the PTF host. + duts_running_config_facts (dict): Running config facts of all DUT hosts. + duts_minigraph_facts (dict): Minigraph facts of all DUT hosts. + tbinfo (object): Instance of TestbedInfo. + + Returns: + list: List of FIB info file names on PTF host. + """ + duts_config_facts = duts_running_config_facts + testname = request.node.name + files = [] + if tbinfo['topo']['type'] != "t2": + for dut_index, duthost in enumerate(duthosts): + fib_info = get_fib_info(duthost, duts_config_facts[duthost.hostname], duts_minigraph_facts[duthost.hostname]) + if 'test_basic_fib' in testname and 'backend' in tbinfo['topo']['name']: + # if it is a storage backend topology(bt0 or bt1) and testcase is test_basic_fib + # add a default route as failover in the prefix matching + fib_info[u'0.0.0.0/0'] = [] + fib_info[u'::/0'] = [] + filename = '/root/fib_info_dut_{0}_{1}.txt'.format(testname, dut_index) + gen_fib_info_file(ptfhost, fib_info, filename) + files.append(filename) + else: + fib_info = get_t2_fib_info(duthosts, duts_config_facts, duts_minigraph_facts) + filename = '/root/fib_info_all_duts.txt' + gen_fib_info_file(ptfhost, fib_info, filename) + files.append(filename) + + return files diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index a3b8e502c0b..2f0abb23578 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,13 +10,14 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import ptf_test_port_map from tests.ptf_runner import ptf_runner from tests.common.helpers.assertions import pytest_assert from tests.common.dualtor.mux_simulator_control import mux_server_url from tests.common.utilities import is_ipv4_address -from tests.common.fixtures.fib_utils import fib_info_files +from tests.common.fixtures.fib_utils import fib_info_files_per_function from tests.common.utilities import wait logger = logging.getLogger(__name__) @@ -83,7 +84,7 @@ def set_mux_same_side(tbinfo, mux_server_url): @pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)]) def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, # set_mux_random, - fib_info_files, + fib_info_files_per_function, tbinfo, mux_server_url, router_macs, ignore_ttl, single_fib_for_duts): @@ -106,7 +107,7 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, "ptftests", "fib_test.FibTest", platform_dir="ptftests", - params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs + params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url), "router_macs": router_macs, "ipv4": ipv4, @@ -268,7 +269,7 @@ def add_default_route_to_dut(duts_running_config_facts, duthosts, tbinfo): yield -def test_hash(add_default_route_to_dut, duthosts, fib_info_files, setup_vlan, hash_keys, ptfhost, ipver, +def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, setup_vlan, hash_keys, ptfhost, ipver, set_mux_same_side, tbinfo, mux_server_url, router_macs, ignore_ttl, single_fib_for_duts): @@ -289,7 +290,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files, setup_vlan, ha "ptftests", "hash_test.HashTest", platform_dir="ptftests", - params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs + params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url), "hash_keys": hash_keys, "src_ip_range": ",".join(src_ip_range), From 032ca94b2b2da87f0c3dd7a7ea86beee261a33b8 Mon Sep 17 00:00:00 2001 From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com> Date: Wed, 4 Aug 2021 19:00:23 -0700 Subject: [PATCH 053/117] Increase timeout for sanity check (#3826) What is the motivation for this PR? On multi-asic VS testbed, due to the increased number of services running, we see sanity check not able to complete critical process check before the timeout. This could be a performance issue, but increasing the timeout should help ensure that sanity check is able to finish execution. How did you do it? Increase the timeout to wait for completion of check_processes_on_dut parallel runs. --- tests/common/plugins/sanity_check/checks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index 8030b5fc0b4..38156386cab 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -585,7 +585,13 @@ def _check_monit_on_dut(*args, **kwargs): @pytest.fixture(scope="module") def check_processes(duthosts): def _check(*args, **kwargs): - result = parallel_run(_check_processes_on_dut, args, kwargs, duthosts, timeout=600) + timeout = 600 + # Increase the timeout for multi-asic virtual switch DUT. + for node in duthosts.nodes: + if 'kvm' in node.sonichost.facts['platform'] and node.sonichost.is_multi_asic: + timeout = 1000 + break + result = parallel_run(_check_processes_on_dut, args, kwargs, duthosts, timeout=timeout) return result.values() @reset_ansible_local_tmp From 208ebd00bbb2a890f36db3980e7edff1213ce304 Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Thu, 5 Aug 2021 19:26:21 +0800 Subject: [PATCH 054/117] [platform_test/api] Enhance lpmode API test (#3828) - What is the motivation for this PR? To enhance the platform SFP low power mode API test case: Adding a function to judge whether the transceiver is supporting low power mode. Currently, it only judges whether the transceiver is an optical one, but not all optical transceivers support lpmode. Set lpmode needs some time to take effect, need to add some delay before checking the status after set. - How did you do it? Add a new function to judge whether the transceiver is supporting low power mode, if not, skip the transceiver in the test. the criteria like below: a. whether the transceiver is a QSFP module, SFP/SFP+ don't support lpmode. b. whether the transceiver's power class is higher than 'Power Class 1' Use wait_until to add some delay before checking the lpmode status after the set operation. - How did you verify/test it? Run test against various types of transceivers --- tests/platform_tests/api/test_sfp.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/platform_tests/api/test_sfp.py b/tests/platform_tests/api/test_sfp.py index fbf9c5a4c47..be1b7876906 100644 --- a/tests/platform_tests/api/test_sfp.py +++ b/tests/platform_tests/api/test_sfp.py @@ -8,6 +8,7 @@ from tests.common.helpers.platform_api import sfp from tests.common.utilities import skip_version from tests.common.platform.interface_utils import get_port_map +from tests.common.utilities import wait_until from platform_api_test_base import PlatformApiTestBase @@ -186,6 +187,14 @@ def is_xcvr_resettable(self, xcvr_info_dict): return False return True + def is_xcvr_support_lpmode(self, xcvr_info_dict): + """Returns True if transceiver is support low power mode, False if not supported""" + xcvr_type = xcvr_info_dict["type"] + ext_identifier = xcvr_info_dict["ext_identifier"] + if not "QSFP" in xcvr_type or "Power Class 1" in ext_identifier: + return False + return True + # # Functions to test methods inherited from DeviceBase class # @@ -459,6 +468,9 @@ def test_tx_disable_channel(self, duthosts, enum_rand_one_per_hwsku_hostname, lo self.expect(tx_disable_chan_mask == expected_mask, "Transceiver {} TX disabled channel data is incorrect".format(i)) self.assert_expectations() + def _check_lpmode_status(self, sfp,platform_api_conn, i, state): + return state == sfp.get_lpmode(platform_api_conn, i) + def test_lpmode(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): """This function tests both the get_lpmode() and set_lpmode() APIs""" for i in self.candidate_sfp: @@ -467,7 +479,7 @@ def test_lpmode(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, pla if not self.expect(info_dict is not None, "Unable to retrieve transceiver {} info".format(i)): continue - if not self.is_xcvr_optical(info_dict): + if not self.is_xcvr_support_lpmode(info_dict): logger.warning("test_lpmode: Skipping transceiver {} (not applicable for this transceiver type)".format(i)) continue @@ -478,9 +490,8 @@ def test_lpmode(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, pla logger.warning("test_lpmode: Skipping transceiver {} (not supported on this platform)".format(i)) break self.expect(ret is True, "Failed to {} low-power mode for transceiver {}".format("enable" if state is True else "disable", i)) - lpmode = sfp.get_lpmode(platform_api_conn, i) - if self.expect(lpmode is not None, "Unable to retrieve transceiver {} low-power mode".format(i)): - self.expect(lpmode == state, "Transceiver {} low-power is incorrect".format(i)) + self.expect(wait_until(5, 1, self._check_lpmode_status, sfp, platform_api_conn, i, state), + "Transceiver {} expected low-power state {} is not aligned with the real state".format(i, "enable" if state is True else "disable")) self.assert_expectations() def test_power_override(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): From 061acc151080f59126c352b3ffafc8f43aa78e5f Mon Sep 17 00:00:00 2001 From: lguohan Date: Thu, 5 Aug 2021 08:27:07 -0700 Subject: [PATCH 055/117] [python3]: make modules python2/python3 compatible (#3948) Signed-off-by: Guohan Lu --- ansible/library/fabric_info.py | 4 ++-- ansible/library/port_alias.py | 18 +++++++++--------- ansible/library/test_facts.py | 6 +++++- ansible/library/topo_facts.py | 30 ++++++++++++++++++++---------- 4 files changed, 36 insertions(+), 22 deletions(-) diff --git a/ansible/library/fabric_info.py b/ansible/library/fabric_info.py index 4c2a35113b6..1b14caed4fc 100644 --- a/ansible/library/fabric_info.py +++ b/ansible/library/fabric_info.py @@ -59,10 +59,10 @@ def main(): 'ip6_prefix': next_v6addr + "/" + v6pfx[-1] } fabric_info.append( data ) module.exit_json(ansible_facts={'fabric_info': fabric_info}) - except (IOError, OSError), e: + except (IOError, OSError) as e: fail_msg = "IO error" + str(e) module.fail_json(msg=fail_msg) - except Exception, e: + except Exception as e: fail_msg = "failed to find the correct fabric asic info " + str(e) module.fail_json(msg=fail_msg) diff --git a/ansible/library/port_alias.py b/ansible/library/port_alias.py index 6fc7699ccb4..656c4c9ce04 100755 --- a/ansible/library/port_alias.py +++ b/ansible/library/port_alias.py @@ -9,9 +9,9 @@ from collections import defaultdict try: - from sonic_py_common import multi_asic + from sonic_py_common import multi_asic except ImportError: - print("Failed to import multi_asic") + print("Failed to import multi_asic") DOCUMENTATION = ''' module: port_alias.py @@ -23,7 +23,7 @@ The definition of this mapping is specified in http://github.com/azure/sonic-buildimage/device You should build docker-sonic-mgmt from sonic-buildimage and run Ansible from sonic-mgmt docker container For multi-asic platforms, port_config.ini for each asic will be parsed to get the port_alias information. - When bringing up the testbed, port-alias will only contain external interfaces, so that vs image can come up with + When bringing up the testbed, port-alias will only contain external interfaces, so that vs image can come up with external interfaces. Input: hwsku num_asic @@ -220,7 +220,7 @@ def main(): aliasmap = {} portspeed = {} sysports = [] - # ASIC interface names of front panel interfaces + # ASIC interface names of front panel interfaces front_panel_asic_ifnames = [] # { asic_name: [ asic interfaces] } asic_if_names = {} @@ -238,16 +238,16 @@ def main(): start_switchid = 0 if 'start_switchid' in m_args and m_args['start_switchid'] != None: start_switchid = int(m_args['start_switchid']) - # When this script is invoked on sonic-mgmt docker, num_asic + # When this script is invoked on sonic-mgmt docker, num_asic # parameter is passed. if m_args['num_asic'] is not None: num_asic = m_args['num_asic'] else: # When this script is run on the device, num_asic parameter # is not passed. - try: + try: num_asic = multi_asic.get_num_asics() - except Exception, e: + except Exception as e: num_asic = 1 switchid = 0 @@ -286,10 +286,10 @@ def main(): 'front_panel_asic_ifnames': front_panel_asic_ifnames, 'asic_if_names': asic_if_names, 'sysports': sysports}) - except (IOError, OSError), e: + except (IOError, OSError) as e: fail_msg = "IO error" + str(e) module.fail_json(msg=fail_msg) - except Exception, e: + except Exception as e: fail_msg = "failed to find the correct port config for "+m_args['hwsku'] + str(e) module.fail_json(msg=fail_msg) diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py index e2c7ddf26a9..2087d2d2ae3 100644 --- a/ansible/library/test_facts.py +++ b/ansible/library/test_facts.py @@ -1,5 +1,6 @@ #!/usr/bin/env python import os +import sys import traceback import ipaddr as ipaddress import csv @@ -144,7 +145,10 @@ def _read_testbed_topo_from_csv(): line['ptf_ipv6'], line['ptf_netmask_v6'] = \ _cidr_to_ip_mask(line["ptf_ipv6"]) - line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + if sys.version_info < (3, 0): + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + else: + line['duts'] = line['dut'].translate(str.maketrans("", "", "[] ")).split(';') line['duts_map'] = {dut: line['duts'].index(dut) for dut in line['duts']} del line['dut'] diff --git a/ansible/library/topo_facts.py b/ansible/library/topo_facts.py index c1a0aec19af..e2b166fb9b0 100644 --- a/ansible/library/topo_facts.py +++ b/ansible/library/topo_facts.py @@ -2,6 +2,7 @@ import os import traceback import ipaddress +import sys import csv from operator import itemgetter from itertools import groupby @@ -104,7 +105,7 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs dut_index = 0 for asic_intf in topo_definition['topology'][neigh_type][vm]['asic_intfs']: vmconfig[vm]['asic_intfs'][dut_index].append(asic_intf) - + # physical interface for intf in topo_definition['configuration'][vm]['interfaces']: if (neigh_type == 'VMs' and 'Ethernet' in intf) or \ @@ -114,17 +115,17 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index'] if 'lacp' in topo_definition['configuration'][vm]['interfaces'][intf]: po_map[topo_definition['configuration'][vm]['interfaces'][intf]['lacp']] = dut_index - + vmconfig[vm]['intfs'][dut_index].append(intf) - + # ip interface vmconfig[vm]['ip_intf'] = [None] * dut_num vmconfig[vm]['peer_ipv4'] = [None] * dut_num vmconfig[vm]['ipv4mask'] = [None] * dut_num vmconfig[vm]['peer_ipv6'] = [None] * dut_num vmconfig[vm]['ipv6mask'] = [None] * dut_num - - + + for intf in topo_definition['configuration'][vm]['interfaces']: dut_index = 0 if (neigh_type == 'VMs' and 'Ethernet' in intf) or \ @@ -134,7 +135,7 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs elif 'Port-Channel' in intf: m = re.search("(\d+)", intf) dut_index = po_map[int(m.group(1))] - + if 'ipv4' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()): (peer_ipv4, ipv4_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv4'].split('/') vmconfig[vm]['peer_ipv4'][dut_index] = peer_ipv4 @@ -145,26 +146,35 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs vmconfig[vm]['peer_ipv6'][dut_index] = ipv6_addr.upper() vmconfig[vm]['ipv6mask'][dut_index] = ipv6_mask vmconfig[vm]['ip_intf'][dut_index] = intf - + # bgp vmconfig[vm]['bgp_ipv4'] = [None] * dut_num vmconfig[vm]['bgp_ipv6'] = [None] * dut_num vmconfig[vm]['bgp_asn'] = topo_definition['configuration'][vm]['bgp']['asn'] for ipstr in topo_definition['configuration'][vm]['bgp']['peers'][dut_asn]: - ip = ipaddress.ip_address(ipstr.decode('utf8')) + if sys.version_info < (3, 0): + ip = ipaddress.ip_address(ipstr.decode('utf8')) + else: + ip = ipaddress.ip_address(ipstr) for dut_index in range(0, dut_num): if ip.version == 4: # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index if vmconfig[vm]['peer_ipv4'][dut_index]: ipsubnet_str = vmconfig[vm]['peer_ipv4'][dut_index]+'/'+vmconfig[vm]['ipv4mask'][dut_index] - ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + if sys.version_info < (3, 0): + ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + else: + ipsubnet = ipaddress.ip_interface(ipsubnet_str) if ip in ipsubnet.network: vmconfig[vm]['bgp_ipv4'][dut_index] = ipstr.upper() elif ip.version == 6: # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index if vmconfig[vm]['peer_ipv6'][dut_index]: ipsubnet_str = vmconfig[vm]['peer_ipv6'][dut_index]+'/'+vmconfig[vm]['ipv6mask'][dut_index] - ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + if sys.version_info < (3, 0): + ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + else: + ipsubnet = ipaddress.ip_interface(ipsubnet_str) if ip in ipsubnet.network: vmconfig[vm]['bgp_ipv6'][dut_index] = ipstr.upper() return vmconfig From 5074a3af9397882ec0ea2f7cb2482e5421276708 Mon Sep 17 00:00:00 2001 From: vmittal-msft <46945843+vmittal-msft@users.noreply.github.com> Date: Thu, 5 Aug 2021 11:05:19 -0700 Subject: [PATCH 056/117] QoS test cases fixes for TD3 based systems (#3946) --- tests/qos/files/qos.yml | 30 ++++++++++++++++-------------- tests/qos/qos_sai_base.py | 13 ++++++++++++- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/tests/qos/files/qos.yml b/tests/qos/files/qos.yml index 20915a09e94..84ebf6bb652 100644 --- a/tests/qos/files/qos.yml +++ b/tests/qos/files/qos.yml @@ -427,19 +427,21 @@ qos_params: td3: topo-any: 50000_300m: - pkts_num_leak_out: 22 + pkts_num_leak_out: 32 xoff_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 13021 - pkts_num_trig_ingr_drp: 13261 + pkts_num_trig_pfc: 13012 + pkts_num_trig_ingr_drp: 13252 + pkts_num_margin: 4 xoff_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 13021 - pkts_num_trig_ingr_drp: 13261 + pkts_num_trig_pfc: 13012 + pkts_num_trig_ingr_drp: 13252 + pkts_num_margin: 4 hdrm_pool_size: dscps: [3, 4] ecn: 1 @@ -454,21 +456,21 @@ qos_params: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 13021 - pkts_num_trig_ingr_drp: 13261 + pkts_num_trig_pfc: 13012 + pkts_num_trig_ingr_drp: 13252 cell_size: 256 pkts_num_margin: 2 xon_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 13021 + pkts_num_trig_pfc: 13012 pkts_num_dismiss_pfc: 18 xon_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 13021 + pkts_num_trig_pfc: 13012 pkts_num_dismiss_pfc: 18 lossy_queue_1: dscp: 8 @@ -480,7 +482,7 @@ qos_params: ecn: 1 pg: 3 pkts_num_fill_min: 18 - pkts_num_trig_pfc: 13021 + pkts_num_trig_pfc: 13012 packet_size: 64 cell_size: 256 wm_pg_shared_lossy: @@ -496,7 +498,7 @@ qos_params: ecn: 1 queue: 3 pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 13261 + pkts_num_trig_ingr_drp: 13252 cell_size: 256 wm_buf_pool_lossless: dscp: 3 @@ -504,8 +506,8 @@ qos_params: pg: 3 queue: 3 pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 13021 - pkts_num_trig_ingr_drp: 13261 + pkts_num_trig_pfc: 13012 + pkts_num_trig_ingr_drp: 13252 pkts_num_fill_egr_min: 8 cell_size: 256 wm_q_shared_lossy: @@ -525,7 +527,7 @@ qos_params: pkts_num_fill_egr_min: 14 cell_size: 256 100000_300m: - pkts_num_leak_out: 41 + pkts_num_leak_out: 32 xoff_1: dscp: 3 ecn: 1 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index b3dbaf84fbd..ca04f6440eb 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -590,7 +590,8 @@ def updateIptables( @pytest.fixture(scope='class') def stopServices( self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index, - swapSyncd, enable_container_autorestart, disable_container_autorestart + swapSyncd, enable_container_autorestart, disable_container_autorestart, + tbinfo ): """ Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start @@ -638,12 +639,22 @@ def updateDockerService(host, docker="", action="", service=""): for service in services: updateDockerService(duthost, action="stop", **service) + """ Disable linkmgr """ + if 'dualtor' in tbinfo['topo']['name']: + duthost.shell('sudo config feature state mux disabled') + logger.info("Disable linkmgr for dual ToR testbed") + yield enable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list) for service in services: updateDockerService(duthost, action="start", **service) + """ Enable linkmgr """ + if 'dualtor' in tbinfo['topo']['name']: + duthost.shell('sudo config feature state mux enabled') + logger.info("Enable linkmgr for dual ToR testbed") + @pytest.fixture(autouse=True) def updateLoganalyzerExceptions(self, rand_one_dut_hostname, loganalyzer): """ From bac8b9bf7c51008ceab75e83ce68fa9473a7d2ec Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Thu, 5 Aug 2021 16:24:20 -0700 Subject: [PATCH 057/117] [sfp test option] move option --skip-absent-sfp definition to conftest.py (#3973) What is the motivation for this PR? Nightly test will fail if option --skip-absent-sfp is used when no target test script is specified. Which is usually how we run the nightly tests. How did you do it? Move the option definition to conftest.py How did you verify/test it? Kick off a nightly test with the change. Signed-off-by: Ying Xie ying.xie@microsoft.com --- tests/conftest.py | 10 ++++++++++ tests/platform_tests/args/api_sfp_args.py | 15 --------------- tests/platform_tests/conftest.py | 2 -- 3 files changed, 10 insertions(+), 17 deletions(-) delete mode 100644 tests/platform_tests/args/api_sfp_args.py diff --git a/tests/conftest.py b/tests/conftest.py index 43e05d14f87..dfef588440c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -120,6 +120,16 @@ def pytest_addoption(parser): ############################ parser.addoption("--testnum", action="store", default=None, type=str) + ############################ + # platform sfp api options # + ############################ + + # Allow user to skip the absent sfp modules. User can use it like below: + # "--skip-absent-sfp=True" + # If this option is not specified, False will be used by default. + parser.addoption("--skip-absent-sfp", action="store", type=bool, default=False, + help="Skip test on absent SFP", + ) @pytest.fixture(scope="session", autouse=True) def enhance_inventory(request): diff --git a/tests/platform_tests/args/api_sfp_args.py b/tests/platform_tests/args/api_sfp_args.py deleted file mode 100644 index 2a0a7da228d..00000000000 --- a/tests/platform_tests/args/api_sfp_args.py +++ /dev/null @@ -1,15 +0,0 @@ -def add_api_sfp_args(parser): - ############################ - # platform sfp api options # - ############################ - - # Allow user to skip the absent sfp modules. User can use it like below: - # "--skip-absent-sfp=True" - # If this option is not specified, False will be used by default. - parser.addoption( - "--skip-absent-sfp", - action="store", - type=bool, - default=False, - help="Skip test on absent SFP", - ) diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index e622a9d03ba..8ef4c20a75e 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -14,7 +14,6 @@ from .args.advanced_reboot_args import add_advanced_reboot_args from .args.cont_warm_reboot_args import add_cont_warm_reboot_args from .args.normal_reboot_args import add_normal_reboot_args -from .args.api_sfp_args import add_api_sfp_args TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates") FMT = "%b %d %H:%M:%S.%f" @@ -339,7 +338,6 @@ def pytest_addoption(parser): add_advanced_reboot_args(parser) add_cont_warm_reboot_args(parser) add_normal_reboot_args(parser) - add_api_sfp_args(parser) def pytest_generate_tests(metafunc): From ea0d0c6497db6e6f4893d0c1137f3e459c69491d Mon Sep 17 00:00:00 2001 From: Blueve <672454911@qq.com> Date: Fri, 6 Aug 2021 13:04:14 +0800 Subject: [PATCH 058/117] [tests/run_tests] Support deselect specific test case (#3949) Signed-off-by: Jing Kan jika@microsoft.com --- tests/run_tests.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index d2cd3f2a2aa..ab8ebe9c502 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -157,7 +157,11 @@ function setup_test_options() fi for skip in ${SKIP_SCRIPTS} ${SKIP_FOLDERS}; do - PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --ignore=${skip}" + if [[ $skip == *"::"* ]]; then + PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --deselect=${skip}" + else + PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --ignore=${skip}" + fi done if [[ -d ${LOG_PATH} ]]; then From 4455edd2e96612cd972c57affff8e258f0fd53e9 Mon Sep 17 00:00:00 2001 From: roman_savchuk Date: Fri, 6 Aug 2021 21:25:24 +0300 Subject: [PATCH 059/117] [ test_pcied ] Fixed redis status table key value (#3795) Signed-off-by: Roman Savchuk romanx.savchuk@intel.com Description of PR Summary: Fixes # (issue) Type of change Bug fix Testbed and Framework(new/improvement) Test case(new/improvement) Back port request 201911 Approach What is the motivation for this PR? Test test_pmon_pcied_running_status failed as daemon_db_value return empty string How did you do it? Changed pcie_devices_status_tbl_key = "PCIE_DEVICES|status" value from "PCIE_DEVICES|status" to 'PCIE_DEVICES` How did you verify/test it? Run test, test_pmon_pcied_running_status passed Any platform specific information? `SONiC Software Version: SONiC.master.15330-dirty-20210516.082409 Distribution: Debian 10.9 Kernel: 4.19.0-12-2-amd64 Build commit: ea803257 Build date: Sun May 16 14:02:33 UTC 2021 Built by: AzDevOps@sonic-build-workers-0009PE Platform: x86_64-arista_7170_64c HwSKU: Arista-7170-64C ASIC: barefoot ASIC Count: 1` --- tests/platform_tests/daemon/test_pcied.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/platform_tests/daemon/test_pcied.py b/tests/platform_tests/daemon/test_pcied.py index 9b79b23e683..401241b3782 100644 --- a/tests/platform_tests/daemon/test_pcied.py +++ b/tests/platform_tests/daemon/test_pcied.py @@ -37,7 +37,6 @@ SIG_TERM = "-15" SIG_KILL = "-9" -pcie_devices_status_tbl_key = "PCIE_DEVICES|status" status_field = "status" expected_pcied_devices_status = "PASSED" @@ -102,7 +101,13 @@ def check_daemon_status(duthosts, rand_one_dut_hostname): duthost.start_pmon_daemon(daemon_name) time.sleep(10) -def test_pmon_pcied_running_status(duthosts, rand_one_dut_hostname): +@pytest.fixture(scope="module", autouse=True) +def get_pcie_devices_tbl_key(duthosts,rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + command_output = duthost.shell("redis-cli -n 6 keys '*' | grep PCIE_DEVICES") + return command_output["stdout"] + +def test_pmon_pcied_running_status(duthosts, rand_one_dut_hostname, get_pcie_devices_tbl_key): """ @summary: This test case is to check pcied status on dut """ @@ -114,9 +119,9 @@ def test_pmon_pcied_running_status(duthosts, rand_one_dut_hostname): pytest_assert(daemon_pid != -1, "Pcied expected pid is a positive integer but is {}".format(daemon_pid)) - daemon_db_value = duthost.get_pmon_daemon_db_value(pcie_devices_status_tbl_key, status_field) + daemon_db_value = duthost.get_pmon_daemon_db_value(get_pcie_devices_tbl_key, status_field) pytest_assert(daemon_db_value == expected_pcied_devices_status, - "Expected {} {} is {} but is {}".format(pcie_devices_status_tbl_key, status_field, expected_pcied_devices_status, daemon_db_value)) + "Expected {} {} is {} but is {}".format(get_pcie_devices_tbl_key, status_field, expected_pcied_devices_status, daemon_db_value)) From 928100db6702f07d9cf40d786ffd243ffb1ede4b Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Sat, 7 Aug 2021 02:49:36 +0800 Subject: [PATCH 060/117] [bgp] Improve test_bgp_gr_helper_routes_perserved (#3942) Fixes #3838 - test_bgp_gr_helper_routes_perserved needs to verify the default route presence in fixture setup_bgp_graceful_restart that would error the testcase for storage backend topologies. So let's skip the check and let the test skip on storage backend topologies. - test_bgp_gr_helper tries to verify that the default route is present before the test run and verify that it is preserved during the BGP restart. - But there is a static default route added by FRR and the check_default_route call still returns True. So add this PR to improve by adding new method get_bgp_route_info and check_bgp_route_info to get and verify only BGP route information. Signed-off-by: Longxiang Lyu How did you do it? only check default route for non-storage-backend topology testbeds. get BGP only routes and verify BGP default route. How did you verify/test it? bgp/test_bgp_gr_helper.py::test_bgp_gr_helper_routes_perserved SKIPPED bgp/test_bgp_gr_helper.py::test_bgp_gr_helper_all_routes_preserved PASSED --- tests/bgp/conftest.py | 7 ++++--- tests/bgp/test_bgp_gr_helper.py | 7 +++---- tests/common/devices/multi_asic.py | 31 ++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py index 08da1fec3bc..3527bcf6ed1 100644 --- a/tests/bgp/conftest.py +++ b/tests/bgp/conftest.py @@ -62,7 +62,7 @@ def check_results(results): @pytest.fixture(scope='module') -def setup_bgp_graceful_restart(duthosts, rand_one_dut_hostname, nbrhosts): +def setup_bgp_graceful_restart(duthosts, rand_one_dut_hostname, nbrhosts, tbinfo): duthost = duthosts[rand_one_dut_hostname] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] @@ -141,9 +141,10 @@ def restore_nbr_gr(node=None, results=None): res = False err_msg = "not all bgp sessions are up after enable graceful restart" - if res and not wait_until(100, 5, duthost.check_default_route): + is_backend_topo = "backend" in tbinfo["topo"]["name"] + if not is_backend_topo and res and not wait_until(100, 5, duthost.check_bgp_default_route): res = False - err_msg = "ipv4 or ipv6 default route not available" + err_msg = "ipv4 or ipv6 bgp default route not available" if not res: # Disable graceful restart in case of failure diff --git a/tests/bgp/test_bgp_gr_helper.py b/tests/bgp/test_bgp_gr_helper.py index eb56e6882e6..4399fa53f12 100644 --- a/tests/bgp/test_bgp_gr_helper.py +++ b/tests/bgp/test_bgp_gr_helper.py @@ -17,18 +17,17 @@ def test_bgp_gr_helper_routes_perserved(duthosts, rand_one_dut_hostname, nbrhost """ duthost = duthosts[rand_one_dut_hostname] + if not duthost.check_bgp_default_route(): + pytest.skip("there is no nexthop for bgp default route") + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) po = config_facts.get('PORTCHANNEL', {}) dev_nbr = config_facts.get('DEVICE_NEIGHBOR', {}) rtinfo_v4 = duthost.get_ip_route_info(ipaddress.ip_network(u'0.0.0.0/0')) - if len(rtinfo_v4['nexthops']) == 0: - pytest.skip("there is no next hop for v4 default route") rtinfo_v6 = duthost.get_ip_route_info(ipaddress.ip_network(u'::/0')) - if len(rtinfo_v6['nexthops']) == 0: - pytest.skip("there is no next hop for v6 default route") ifnames_v4 = [nh[1] for nh in rtinfo_v4['nexthops']] ifnames_v6 = [nh[1] for nh in rtinfo_v6['nexthops']] diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index 155e80403a8..8a6ea9550c7 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -427,3 +427,34 @@ def check_bgp_session_state(self, neigh_ips, state="established"): return True return False + + def get_bgp_route_info(self, prefix, ns=None): + """ + @summary: return BGP routes information. + + @param prefix: IP prefix + @param ns: network namespace + """ + prefix = ipaddress.ip_network(unicode(str(prefix))) + if isinstance(prefix, ipaddress.IPv4Network): + check_cmd = "vtysh -c 'show bgp ipv4 %s json'" + else: + check_cmd = "vtysh -c 'show bgp ipv6 %s json'" + check_cmd %= prefix + if ns is not None: + check_cmd = self.get_vtysh_cmd_for_namespace(check_cmd, ns) + return json.loads(self.shell(check_cmd, verbose=False)['stdout']) + + + def check_bgp_default_route(self, ipv4=True, ipv6=True): + """ + @summary: check if bgp default route is present. + + @param ipv4: check ipv4 default + @param ipv6: check ipv6 default + """ + if ipv4 and len(self.get_bgp_route_info("0.0.0.0/0")) == 0: + return False + if ipv6 and len(self.get_bgp_route_info("::/0")) == 0: + return False + return True From 9516220822172223a42d0c3c9b039ee13714e4d0 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Fri, 6 Aug 2021 14:57:13 -0700 Subject: [PATCH 061/117] Make some Ansible scripts support both Python 2 and 3 (#3978) Make some Ansible scripts support both Python 2 and 3 Signed-off-by: Saikrishna Arcot --- ansible/library/bgp_facts.py | 4 +- ansible/library/bgp_route.py | 2 +- ansible/library/config_facts.py | 6 +- ansible/library/conn_graph_facts.py | 2 +- ansible/library/extract_log.py | 9 +- ansible/library/interface_facts.py | 2 - ansible/library/minigraph_facts.py | 21 ++--- .../files/tools/loganalyzer/loganalyzer.py | 90 +++++++++---------- 8 files changed, 70 insertions(+), 66 deletions(-) diff --git a/ansible/library/bgp_facts.py b/ansible/library/bgp_facts.py index 5d757ac63e4..f6ec49dde06 100644 --- a/ansible/library/bgp_facts.py +++ b/ansible/library/bgp_facts.py @@ -179,7 +179,7 @@ def parse_neighbors(self): value_dict['rcvd'] = int(rcvd) message_stats[key] = value_dict except Exception as e: - print"NonFatal: line:'{}' should not have matched for sent/rcvd count".format(line) + print("NonFatal: line:'{}' should not have matched for sent/rcvd count".format(line)) if capabilities: neighbor['capabilities'] = capabilities @@ -205,7 +205,7 @@ def get_statistics(self): statistics['ipv6_admin_down'] = 0 statistics['ipv6_idle'] = 0 - for neighbor in self.facts['bgp_neighbors'].itervalues(): + for neighbor in self.facts['bgp_neighbors'].values(): if neighbor['ip_version'] == 4: statistics['ipv4'] += 1 if neighbor['admin'] == 'down': diff --git a/ansible/library/bgp_route.py b/ansible/library/bgp_route.py index 7cd8d40abac..1897e9d94ff 100644 --- a/ansible/library/bgp_route.py +++ b/ansible/library/bgp_route.py @@ -228,7 +228,7 @@ def parse_bgp_route_prefix_json(self, cmd_result): p = json.loads(cmd_result) - if not p.has_key('prefix'): + if 'prefix' not in p: self.facts['bgp_route'][prefix]['found'] = False return diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index c80eae2e809..568ccf38862 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -74,7 +74,7 @@ def create_maps(config): #get the port_index from config_db if available port_index_map = { name: int(v['index']) - 1 - for name, v in config['PORT'].iteritems() + for name, v in config['PORT'].items() if 'index' in v } if not port_index_map: @@ -82,10 +82,10 @@ def create_maps(config): for idx, val in enumerate(port_name_list_sorted): port_index_map[val] = idx - port_name_to_alias_map = { name : v['alias'] if 'alias' in v else '' for name, v in config["PORT"].iteritems()} + port_name_to_alias_map = { name : v['alias'] if 'alias' in v else '' for name, v in config["PORT"].items()} # Create inverse mapping between port name and alias - port_alias_to_name_map = {v: k for k, v in port_name_to_alias_map.iteritems()} + port_alias_to_name_map = {v: k for k, v in port_name_to_alias_map.items()} return { 'port_name_to_alias_map' : port_name_to_alias_map, diff --git a/ansible/library/conn_graph_facts.py b/ansible/library/conn_graph_facts.py index fc9cbec72f8..0ffc37d5e45 100755 --- a/ansible/library/conn_graph_facts.py +++ b/ansible/library/conn_graph_facts.py @@ -142,7 +142,7 @@ def port_vlanlist(self, vlanrange): continue elif '-' in vlanid: vlanlist = list(map(str.strip, vlanid.split('-'))) - vlans.extend(range(int(vlanlist[0]), int(vlanlist[1])+1)) + vlans.extend(list(range(int(vlanlist[0]), int(vlanlist[1])+1))) continue elif vlanid != '': raise ValueError('vlan range error "%s"' % vlanrange) diff --git a/ansible/library/extract_log.py b/ansible/library/extract_log.py index b80af9e4a6d..4064478b68c 100644 --- a/ansible/library/extract_log.py +++ b/ansible/library/extract_log.py @@ -78,6 +78,7 @@ import re import sys from datetime import datetime +from functools import cmp_to_key from ansible.module_utils.basic import * @@ -176,8 +177,12 @@ def list_files(directory, prefixname): of files in @directory starting with @prefixname (Comparator used is @filename_comparator)""" - return sorted([filename for filename in os.listdir(directory) - if filename.startswith(prefixname)], cmp=filename_comparator) + if sys.version_info < (3, 0): + return sorted([filename for filename in os.listdir(directory) + if filename.startswith(prefixname)], cmp=filename_comparator) + else: + return sorted([filename for filename in os.listdir(directory) + if filename.startswith(prefixname)], key=cmp_to_key(filename_comparator)) def extract_latest_line_with_string(directory, filenames, start_string): diff --git a/ansible/library/interface_facts.py b/ansible/library/interface_facts.py index b5863a0525e..654bc6c9b4a 100644 --- a/ansible/library/interface_facts.py +++ b/ansible/library/interface_facts.py @@ -12,8 +12,6 @@ import datetime import getpass import pwd -import ConfigParser -import StringIO import json from ansible.module_utils.basic import * diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py index f7dee8bf3ca..c931471a5cf 100644 --- a/ansible/library/minigraph_facts.py +++ b/ansible/library/minigraph_facts.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import print_function import calendar import os import sys @@ -190,14 +191,14 @@ def parse_png(png, hname): startport = link.find(str(QName(ns, "StartPort"))).text if enddevice == hname: - if port_alias_to_name_map.has_key(endport): + if endport in port_alias_to_name_map: endport = port_alias_to_name_map[endport] if startdevice.lower() in namespace_list: neighbors_namespace[endport] = startdevice.lower() else: neighbors[endport] = {'name': startdevice, 'port': startport, 'namespace':''} elif startdevice == hname: - if port_alias_to_name_map.has_key(startport): + if startport in port_alias_to_name_map: startport = port_alias_to_name_map[startport] if enddevice.lower() in namespace_list: neighbors_namespace[startport] = enddevice.lower() @@ -249,7 +250,7 @@ def parse_png(png, hname): elif node.tag == str(QName(ns, "EndDevice")): mgmt_dev = node.text - for k, v in neighbors.iteritems(): + for k, v in neighbors.items(): v['namespace'] = neighbors_namespace[k] return (neighbors, devices, console_dev, console_port, mgmt_dev, mgmt_port) @@ -419,11 +420,11 @@ def _parse_intf(intfname, ipprefix): acl_intfs = [] for member in aclattach: member = member.strip() - if pcs.has_key(member): + if member in pcs: acl_intfs.extend(pcs[member]['members']) # For ACL attaching to port channels, we break them into port channel members - elif vlans.has_key(member): - print >> sys.stderr, "Warning: ACL " + aclname + " is attached to a Vlan interface, which is currently not supported" - elif port_alias_to_name_map.has_key(member): + elif member in vlans: + print("Warning: ACL " + aclname + " is attached to a Vlan interface, which is currently not supported", file=sys.stderr) + elif member in port_alias_to_name_map: acl_intfs.append(port_alias_to_name_map[member]) if acl_intfs: acls[aclname] = acl_intfs @@ -635,7 +636,7 @@ def parse_xml(filename, hostname, asic_name=None): port_alias_to_name_map, port_alias_asic_map = get_port_alias_to_name_map(hwsku, asic_id) # Create inverse mapping between port name and alias - port_name_to_alias_map = {v: k for k, v in port_alias_to_name_map.iteritems()} + port_name_to_alias_map = {v: k for k, v in port_alias_to_name_map.items()} for child in root: if asic_name is None: @@ -663,10 +664,10 @@ def parse_xml(filename, hostname, asic_name=None): # Associate Port Channel to namespace try: - for pckey, pcval in pcs.iteritems(): + for pckey, pcval in pcs.items(): pcval['namespace'] = neighbors[pcval['members'][0]]['namespace'] except Exception as e: - print >> sys.stderr, "Warning: PortChannel " + pckey + " has no member ports." + print("Warning: PortChannel " + pckey + " has no member ports.", file=sys.stderr) # TODO: Move all alias-related code out of minigraph_facts.py and into # its own module to be used as another layer after parsing the minigraph. diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py index 105435caad0..a7b99905ea0 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py @@ -16,6 +16,7 @@ #--------------------------------------------------------------------- # Global imports #--------------------------------------------------------------------- +from __future__ import print_function import sys import getopt import re @@ -24,7 +25,6 @@ import logging import logging.handlers from datetime import datetime -from __builtin__ import True #--------------------------------------------------------------------- # Global variables @@ -90,7 +90,7 @@ def print_diagnostic_message(self, message): if (not self.verbose): return - print '[LogAnalyzer][diagnostic]:%s' % message + print('[LogAnalyzer][diagnostic]:%s' % message) #--------------------------------------------------------------------- def create_start_marker(self): @@ -178,7 +178,7 @@ def error_to_regx(self, error_string): ''' #-- Check if error_string is a string or a list --# - if (isinstance(error_string, basestring)): + if (isinstance(error_string, str)): original_string = error_string #-- Escapes out of all the meta characters --# error_string = re.escape(error_string) @@ -248,8 +248,8 @@ def create_msg_regex(self, file_lsit): messages_regex.append(self.error_to_regx(row[1:])) except Exception as e: - print 'ERROR: line %d is formatted incorrectly in file %s. Skipping line' % (index, filename) - print repr(e) + print('ERROR: line %d is formatted incorrectly in file %s. Skipping line' % (index, filename)) + print(repr(e)) sys.exit(err_invalid_string_format) if (len(messages_regex)): @@ -351,7 +351,7 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege if rev_line.find(end_marker) != -1: self.print_diagnostic_message('found end marker: %s' % end_marker) if (found_end_marker): - print 'ERROR: duplicate end marker found' + print('ERROR: duplicate end marker found') sys.exit(err_duplicate_end_marker) found_end_marker = True in_analysis_range = True @@ -361,12 +361,12 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege if rev_line.find(start_marker) != -1 and 'nsible' not in rev_line: self.print_diagnostic_message('found start marker: %s' % start_marker) if (found_start_marker): - print 'ERROR: duplicate start marker found' + print('ERROR: duplicate start marker found') sys.exit(err_duplicate_start_marker) found_start_marker = True if(not in_analysis_range): - print 'ERROR: found start marker:%s without corresponding end marker' % rev_line + print('ERROR: found start marker:%s without corresponding end marker' % rev_line) sys.exit(err_no_end_marker) in_analysis_range = False break @@ -385,11 +385,11 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege # care about the markers only if input is not stdin or no need to check start marker if not stdin_as_input and check_marker: if (not found_start_marker): - print 'ERROR: start marker was not found' + print('ERROR: start marker was not found') sys.exit(err_no_start_marker) if (not found_end_marker): - print 'ERROR: end marker was not found' + print('ERROR: end marker was not found') sys.exit(err_no_end_marker) return matching_lines, expected_lines @@ -428,32 +428,32 @@ def analyze_file_list(self, log_file_list, match_messages_regex, ignore_messages #--------------------------------------------------------------------- def usage(): - print 'loganalyzer input parameters:' - print '--help Print usage' - print '--verbose Print verbose output during the run' - print '--action init|analyze - action to perform.' - print ' init - initialize analysis by placing start-marker' - print ' to all log files specified in --logs parameter.' - print ' analyze - perform log analysis of files specified in --logs parameter.' - print ' add_end_marker - add end marker to all log files specified in --logs parameter.' - print '--out_dir path Directory path where to place output files, ' - print ' must be present when --action == analyze' - print '--logs path{,path} List of full paths to log files to be analyzed.' - print ' Implicitly system log file will be also processed' - print '--run_id string String passed to loganalyzer, uniquely identifying ' - print ' analysis session. Used to construct start/end markers. ' - print '--match_files_in path{,path} List of paths to files containing strings. A string from log file' - print ' By default syslog will be always analyzed and should be passed by match_files_in.' - print ' matching any string from match_files_in will be collected and ' - print ' reported. Must be present when action == analyze' - print '--ignore_files_in path{,path} List of paths to files containing string. ' - print ' A string from log file matching any string from these' - print ' files will be ignored during analysis. Must be present' - print ' when action == analyze.' - print '--expect_files_in path{,path} List of path to files containing string. ' - print ' All the strings from these files will be expected to present' - print ' in one of specified log files during the analysis. Must be present' - print ' when action == analyze.' + print('loganalyzer input parameters:') + print('--help Print usage') + print('--verbose Print verbose output during the run') + print('--action init|analyze - action to perform.') + print(' init - initialize analysis by placing start-marker') + print(' to all log files specified in --logs parameter.') + print(' analyze - perform log analysis of files specified in --logs parameter.') + print(' add_end_marker - add end marker to all log files specified in --logs parameter.') + print('--out_dir path Directory path where to place output files, ') + print(' must be present when --action == analyze') + print('--logs path{,path} List of full paths to log files to be analyzed.') + print(' Implicitly system log file will be also processed') + print('--run_id string String passed to loganalyzer, uniquely identifying ') + print(' analysis session. Used to construct start/end markers. ') + print('--match_files_in path{,path} List of paths to files containing strings. A string from log file') + print(' By default syslog will be always analyzed and should be passed by match_files_in.') + print(' matching any string from match_files_in will be collected and ') + print(' reported. Must be present when action == analyze') + print('--ignore_files_in path{,path} List of paths to files containing string. ') + print(' A string from log file matching any string from these') + print(' files will be ignored during analysis. Must be present') + print(' when action == analyze.') + print('--expect_files_in path{,path} List of path to files containing string. ') + print(' All the strings from these files will be expected to present') + print(' in one of specified log files during the analysis. Must be present') + print(' when action == analyze.') #--------------------------------------------------------------------- @@ -473,17 +473,17 @@ def check_action(action, log_files_in, out_dir, match_files_in, ignore_files_in, ret_code = True elif (action == 'analyze'): if out_dir is None or len(out_dir) == 0: - print 'ERROR: missing required out_dir for analyze action' + print('ERROR: missing required out_dir for analyze action') ret_code = False elif match_files_in is None or len(match_files_in) == 0: - print 'ERROR: missing required match_files_in for analyze action' + print('ERROR: missing required match_files_in for analyze action') ret_code = False else: ret_code = False - print 'ERROR: invalid action:%s specified' % action + print('ERROR: invalid action:%s specified' % action) return ret_code #--------------------------------------------------------------------- @@ -500,7 +500,7 @@ def check_run_id(run_id): ret_code = True if ((run_id is None) or (len(run_id) == 0)): - print 'ERROR: no run_id specified' + print('ERROR: no run_id specified') ret_code = False return ret_code @@ -524,7 +524,7 @@ def write_result_file(run_id, out_dir, analysis_result_per_file, messages_regex_ expected_lines_total = [] with open(out_dir + "/result.loganalysis." + run_id + ".log", 'w') as out_file: - for key, val in analysis_result_per_file.iteritems(): + for key, val in analysis_result_per_file.items(): matching_lines, expected_lines = val out_file.write("\n-----------Matches found in file:'%s'-----------\n" % key) @@ -577,7 +577,7 @@ def write_summary_file(run_id, out_dir, analysis_result_per_file, unused_regex_m out_file.write("\nLOG ANALYSIS SUMMARY\n") total_match_cnt = 0 total_expect_cnt = 0 - for key, val in analysis_result_per_file.iteritems(): + for key, val in analysis_result_per_file.items(): matching_lines, expecting_lines = val file_match_cnt = len(matching_lines) @@ -613,7 +613,7 @@ def main(argv): opts, args = getopt.getopt(argv, "a:r:s:l:o:m:i:e:vh", ["action=", "run_id=", "start_marker=", "logs=", "out_dir=", "match_files_in=", "ignore_files_in=", "expect_files_in=", "verbose", "help"]) except getopt.GetoptError: - print "Invalid option specified" + print("Invalid option specified") usage() sys.exit(err_invalid_input) @@ -655,7 +655,7 @@ def main(argv): analyzer = AnsibleLogAnalyzer(run_id, verbose, start_marker) - log_file_list = filter(None, log_files_in.split(tokenizer)) + log_file_list = list(filter(None, log_files_in.split(tokenizer))) result = {} if (action == "init"): @@ -686,7 +686,7 @@ def main(argv): return 0 else: - print 'Unknown action:%s specified' % action + print('Unknown action:%s specified' % action) return len(result) #--------------------------------------------------------------------- From 36f48bbf754dfef4b67f59aedc4e9bc771f65961 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Sat, 7 Aug 2021 06:38:07 +0800 Subject: [PATCH 062/117] Lower the no allure no report url message from error to info (#3979) When allure server is not configured, there is always an error level message "Can not get Allure report URL." shown in teminal summary log. It looks scary and could be confusing. This change lower the message level from error to info. Signed-off-by: Xin Wang --- tests/common/plugins/allure_server/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/plugins/allure_server/__init__.py b/tests/common/plugins/allure_server/__init__.py index ab7fd05cd6f..2d04262233c 100644 --- a/tests/common/plugins/allure_server/__init__.py +++ b/tests/common/plugins/allure_server/__init__.py @@ -61,7 +61,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): if report_url: logger.info('Allure report URL: {}'.format(report_url)) else: - logger.error('Can not get Allure report URL. Please check logs') + logger.info('Can not get Allure report URL. Please check logs') def get_setup_session_info(session): From c27bd588a7c51ee4b4897daa3e4752bfb996a907 Mon Sep 17 00:00:00 2001 From: vmittal-msft <46945843+vmittal-msft@users.noreply.github.com> Date: Fri, 6 Aug 2021 16:02:35 -0700 Subject: [PATCH 063/117] TH3 test parameters for Dell Z9332 systems (#3947) --- ansible/group_vars/sonic/variables | 1 + tests/qos/files/qos.yml | 152 +++++++++++++++++++++++++++++ tests/qos/qos_sai_base.py | 4 +- 3 files changed, 155 insertions(+), 2 deletions(-) diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 26e43da649c..3a708b358d1 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -10,6 +10,7 @@ broadcom_td2_hwskus: ['Force10-S6000', 'Force10-S6000-Q24S32', 'Arista-7050-QX32 broadcom_td3_hwskus: ['Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8'] broadcom_th_hwskus: ['Force10-S6100', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32S-C32-T1', 'Arista-7060CX-32S-D48C8', 'Celestica-DX010-C32', "Seastone-DX010" ] broadcom_th2_hwskus: ['Arista-7260CX3-D108C8', 'Arista-7260CX3-C64', 'Arista-7260CX3-Q64'] +broadcom_th3_hwskus: ['DellEMC-Z9332f-M-O16C64', 'DellEMC-Z9332f-M-O32'] mellanox_spc1_hwskus: [ 'ACS-MSN2700', 'ACS-MSN2740', 'ACS-MSN2100', 'ACS-MSN2410', 'ACS-MSN2010', 'Mellanox-SN2700', 'Mellanox-SN2700-D48C8' ] mellanox_spc2_hwskus: [ 'ACS-MSN3700', 'ACS-MSN3700C', 'ACS-MSN3800', 'Mellanox-SN3800-D112C8' , 'ACS-MSN3420'] diff --git a/tests/qos/files/qos.yml b/tests/qos/files/qos.yml index 84ebf6bb652..14810c87f26 100644 --- a/tests/qos/files/qos.yml +++ b/tests/qos/files/qos.yml @@ -1311,3 +1311,155 @@ qos_params: cell_size: 208 hdrm_pool_wm_multiplier: 4 cell_size: 208 + th3: + topo-any: + 100000_300m: + pkts_num_leak_out: 92 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22058 + pkts_num_trig_ingr_drp: 22520 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 22058 + pkts_num_trig_ingr_drp: 22520 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + dst_port_id: 20 + pgs_num: 37 + pkts_num_trig_pfc: 2634 + pkts_num_hdrm_full: 462 + pkts_num_hdrm_partial: 384 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22058 + pkts_num_trig_ingr_drp: 22520 + cell_size: 254 + pkts_num_margin: 2 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22058 + pkts_num_dismiss_pfc: 7 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 22058 + pkts_num_dismiss_pfc: 7 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_trig_egr_drp: 73501 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 10 + pkts_num_trig_pfc: 22058 + packet_size: 64 + cell_size: 254 + wm_pg_shared_lossy: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 73501 + packet_size: 64 + cell_size: 254 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 22520 + cell_size: 254 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 7 + pkts_num_trig_pfc: 22058 + pkts_num_trig_ingr_drp: 22520 + pkts_num_fill_egr_min: 8 + cell_size: 254 + wm_q_shared_lossy: + dscp: 8 + ecn: 1 + queue: 0 + pkts_num_fill_min: 7 + pkts_num_trig_egr_drp: 73501 + cell_size: 256 + wm_buf_pool_lossy: + dscp: 8 + ecn: 1 + pg: 0 + queue: 0 + pkts_num_fill_ingr_min: 0 + pkts_num_trig_egr_drp: 73501 + pkts_num_fill_egr_min: 7 + cell_size: 254 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 254 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 254 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 254 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 254 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + wrr_chg: + ecn: 1 + q0_num_of_pkts: 80 + q1_num_of_pkts: 80 + q2_num_of_pkts: 80 + q3_num_of_pkts: 300 + q4_num_of_pkts: 300 + q5_num_of_pkts: 80 + q6_num_of_pkts: 80 + limit: 80 + lossy_weight: 8 + lossless_weight: 30 + hdrm_pool_wm_multiplier: 1 + cell_size: 254 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index ca04f6440eb..0d55f5ce52e 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -16,10 +16,10 @@ class QosBase: """ Common APIs """ - SUPPORTED_T0_TOPOS = ["t0", "t0-64", "t0-116", "t0-35", "dualtor-56", "dualtor"] + SUPPORTED_T0_TOPOS = ["t0", "t0-64", "t0-116", "t0-35", "dualtor-56", "dualtor", "t0-80"] SUPPORTED_T1_TOPOS = {"t1-lag", "t1-64-lag"} SUPPORTED_PTF_TOPOS = ['ptf32', 'ptf64'] - SUPPORTED_ASIC_LIST = ["td2", "th", "th2", "spc1", "spc2", "spc3", "td3"] + SUPPORTED_ASIC_LIST = ["td2", "th", "th2", "spc1", "spc2", "spc3", "td3", "th3"] TARGET_QUEUE_WRED = 3 TARGET_LOSSY_QUEUE_SCHED = 0 From c83c58c60fcab3ca25d583eb05e2bf19cd3cc954 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Fri, 6 Aug 2021 21:46:35 -0700 Subject: [PATCH 064/117] [common]: add skip_release to skip certains for tests skip_release is more robust than skip_version Signed-off-by: Guohan Lu --- tests/common/devices/sonic.py | 26 +++++++++++++++++++++++++- tests/common/devices/sonic_asic.py | 8 ++++++-- tests/common/utilities.py | 12 ++++++++++++ 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 57ba5c621a2..3ca368f9edb 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -69,6 +69,7 @@ def __init__(self, ansible_adhoc, hostname, self._facts = self._gather_facts() self._os_version = self._get_os_version() + self._sonic_release = self._get_sonic_release() self.is_multi_asic = True if self.facts["num_asic"] > 1 else False self._kernel_version = self._get_kernel_version() @@ -106,6 +107,17 @@ def os_version(self): return self._os_version + @property + def sonic_release(self): + """ + The SONiC release running on this SONiC device. + + Returns: + str: The SONiC release (e.g. "202012") + """ + + return self._sonic_release + @property def kernel_version(self): """ @@ -270,6 +282,18 @@ def _get_os_version(self): output = self.command("sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version") return output["stdout_lines"][0].strip() + def _get_sonic_release(self): + """ + Gets the SONiC Release that is running on this device. + E.g. 202106, 202012, ... + if the release is master, then return none + """ + + output = self.command("sonic-cfggen -y /etc/sonic/sonic_version.yml -v release") + if len(output['stdout_lines']) == 0: + return 'none' + return output["stdout_lines"][0].strip() + def _get_kernel_version(self): """ Gets the SONiC kernel version @@ -1467,7 +1491,7 @@ def get_auto_negotiation_mode(self, interface_name): interface_name (str): Interface name Returns: - boolean: True if auto negotiation mode is enabled else False. Return None if + boolean: True if auto negotiation mode is enabled else False. Return None if the auto negotiation mode is unknown or unsupported. """ cmd = 'sonic-db-cli APPL_DB HGET \"PORT_TABLE:{}\" \"{}\"'.format(interface_name, 'autoneg') diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py index a9d4d8bf0a1..68f9cfbdd4a 100644 --- a/tests/common/devices/sonic_asic.py +++ b/tests/common/devices/sonic_asic.py @@ -157,6 +157,10 @@ def get_ip_route_info(self, dstip): def os_version(self): return self.sonichost.os_version + @property + def sonic_release(self): + return self.sonichost.sonic_release + def interface_facts(self, *module_args, **complex_args): """Wrapper for the interface_facts ansible module. @@ -402,7 +406,7 @@ def port_exists(self, port): return port in self.ports if_db = self.show_interface( - command="status", + command="status", include_internal_intfs=True )["ansible_facts"]["int_status"] @@ -518,7 +522,7 @@ def get_portchannel_and_members_in_ns(self, tbinfo): pc = k pc_members = mg_facts['minigraph_portchannels'][pc]['members'] break - + return pc, pc_members def get_bgp_statistic(self, stat): diff --git a/tests/common/utilities.py b/tests/common/utilities.py index eb691a5bfde..62610b5ad6e 100644 --- a/tests/common/utilities.py +++ b/tests/common/utilities.py @@ -33,6 +33,18 @@ def skip_version(duthost, version_list): if any(version in duthost.os_version for version in version_list): pytest.skip("DUT has version {} and test does not support {}".format(duthost.os_version, ", ".join(version_list))) +def skip_release(duthost, release_list): + """ + @summary: Skip current test if any given release keywords are in os_version, match sonic_release. + skip_release is more robust than skip_version. + @param duthost: The DUT + @param release_list: A list of incompatible releases + """ + if any(release in duthost.os_version for release in release_list): + pytest.skip("DUT has version {} and test does not support {}".format(duthost.os_version, ", ".join(release_list))) + + if any(release == duthost.sonic_release for release in release_list): + pytest.skip("DUT is release {} and test does not support {}".format(duthost.sonic_release, ", ".join(release_list))) def wait(seconds, msg=""): """ From 0e7ca9f62fbea3ccabf7bd0f1f8f1ebbc454d75e Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Fri, 6 Aug 2021 21:47:37 -0700 Subject: [PATCH 065/117] [dhcpv6_relay]: add skip_release 201811, 201911, 202012 Signed-off-by: Guohan Lu --- tests/dhcp_relay/test_dhcpv6_relay.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/dhcp_relay/test_dhcpv6_relay.py b/tests/dhcp_relay/test_dhcpv6_relay.py index e699bfffdb0..18d04ed8fba 100644 --- a/tests/dhcp_relay/test_dhcpv6_relay.py +++ b/tests/dhcp_relay/test_dhcpv6_relay.py @@ -6,6 +6,7 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.utilities import skip_release from tests.ptf_runner import ptf_runner pytestmark = [ @@ -109,6 +110,7 @@ def test_dhcp_relay_default(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp_r For each DHCP relay agent running on the DuT, verify DHCP packets are relayed properly """ duthost = duthosts[rand_one_dut_hostname] + skip_release(duthost, ["201811", "201911", "202012"]) for dhcp_relay in dut_dhcp_relay_data: # Run the DHCP relay test on the PTF host @@ -134,6 +136,7 @@ def test_dhcp_relay_after_link_flap(ptfhost, duthosts, rand_one_dut_hostname, du then test whether the DHCP relay agent relays packets properly. """ duthost = duthosts[rand_one_dut_hostname] + skip_release(duthost, ["201811", "201911", "202012"]) for dhcp_relay in dut_dhcp_relay_data: # Bring all uplink interfaces down @@ -174,6 +177,7 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, duthosts, rand_one_dut_host relays packets properly. """ duthost = duthosts[rand_one_dut_hostname] + skip_release(duthost, ["201811", "201911", "202012"]) for dhcp_relay in dut_dhcp_relay_data: # Bring all uplink interfaces down From 8736a61f99d48b8e1ccd34502683d054502b876d Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Sat, 7 Aug 2021 18:23:15 +0800 Subject: [PATCH 066/117] [test_crm] Enable storage backend topologies (#3980) Let crm_interface select interfaces out of vlan sub interfaces. Signed-off-by: Longxiang Lyu --- tests/crm/conftest.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/crm/conftest.py b/tests/crm/conftest.py index 934817e5b74..85d6b7a7503 100755 --- a/tests/crm/conftest.py +++ b/tests/crm/conftest.py @@ -91,7 +91,10 @@ def crm_interface(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, e asichost = duthost.asic_instance(enum_frontend_asic_index) mg_facts = asichost.get_extended_minigraph_facts(tbinfo) - if len(mg_facts["minigraph_portchannel_interfaces"]) >= 4: + if "backend" in tbinfo["topo"]["name"]: + crm_intf1 = mg_facts["minigraph_vlan_sub_interfaces"][0]["attachto"] + crm_intf2 = mg_facts["minigraph_vlan_sub_interfaces"][2]["attachto"] + elif len(mg_facts["minigraph_portchannel_interfaces"]) >= 4: crm_intf1 = mg_facts["minigraph_portchannel_interfaces"][0]["attachto"] crm_intf2 = mg_facts["minigraph_portchannel_interfaces"][2]["attachto"] else: From f1569129203e9b9c22ea73fae8f19c8d3333ef44 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Sun, 8 Aug 2021 10:11:48 +0800 Subject: [PATCH 067/117] [BGP] add `test_bgp_gr_helper_all_routes_preserved` (#3972) Approach What is the motivation for this PR? Existing BGP GR testcase test_bgp_gr_helper_routes_perserved only checks for default route presence during graceful-restart. This has two limitations: 1. not support topologies that have no default route(t0-backend and t1-backend) 2. doesn't check other generic routes received/learned from the neighbor experiencing a BGP restart. Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you do it? Add new test case bgp/test_bgp_gr_helper.py::test_bgp_gr_helper_all_routes_preserved to test that those routes received/learned from one neighbor will be kept during the BGP restart, and they should all be in the STALE state. How did you verify/test it? --- tests/bgp/test_bgp_gr_helper.py | 119 ++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/tests/bgp/test_bgp_gr_helper.py b/tests/bgp/test_bgp_gr_helper.py index 4399fa53f12..f346054d7bf 100644 --- a/tests/bgp/test_bgp_gr_helper.py +++ b/tests/bgp/test_bgp_gr_helper.py @@ -1,8 +1,13 @@ import pytest import logging import ipaddress +import random +import json + from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until +from tests.common.utilities import is_ipv4_address + pytestmark = [ pytest.mark.topology('any'), @@ -114,3 +119,117 @@ def test_bgp_gr_helper_routes_perserved(duthosts, rand_one_dut_hostname, nbrhost # Verify no route changes in the application db # TODO + + +def test_bgp_gr_helper_all_routes_preserved(duthosts, rand_one_dut_hostname, nbrhosts, setup_bgp_graceful_restart, tbinfo): + """Verify that routes received from one neighbor are all preserved during peer graceful restart.""" + + def _find_test_bgp_neighbors(test_neighbor_name, bgp_neighbors): + """Find test BGP neighbor peers.""" + test_bgp_neighbors = [] + for bgp_neighbor, neighbor_details in bgp_neighbors.items(): + if test_neighbor_name == neighbor_details['name']: + test_bgp_neighbors.append(bgp_neighbor) + return test_bgp_neighbors + + def _get_rib(duthost): + """Return DUT rib.""" + routes = {} + for namespace in duthost.get_frontend_asic_namespace_list(): + bgp_cmd_ipv4 = "vtysh -c \"show bgp ipv4 json\"" + bgp_cmd_ipv6 = "vtysh -c \"show bgp ipv6 json\"" + cmd = duthost.get_vtysh_cmd_for_namespace(bgp_cmd_ipv4, namespace) + routes.update(json.loads(duthost.shell(cmd, verbose=False)['stdout'])["routes"]) + cmd = duthost.get_vtysh_cmd_for_namespace(bgp_cmd_ipv6, namespace) + routes.update(json.loads(duthost.shell(cmd, verbose=False)['stdout'])["routes"]) + return routes + + def _get_learned_bgp_routes_from_neighbor(duthost, bgp_neighbor): + """Get all learned routes from the BGP neighbor.""" + routes = {} + if is_ipv4_address(unicode(bgp_neighbor)): + cmd = "vtysh -c 'show bgp ipv4 neighbor %s routes json'" % bgp_neighbor + else: + cmd = "vtysh -c 'show bgp ipv6 neighbor %s routes json'" % bgp_neighbor + for namespace in duthost.get_frontend_asic_namespace_list(): + cmd = duthost.get_vtysh_cmd_for_namespace(cmd, namespace) + routes.update(json.loads(duthost.shell(cmd, verbose=False)["stdout"])["routes"]) + return routes + + def _verify_prefix_counters_from_neighbor_after_graceful_restart(duthost, bgp_neighbor): + """Verify that all routes received from neighbor are stale after graceful restart.""" + if is_ipv4_address(unicode(bgp_neighbor)): + cmd = "vtysh -c 'show bgp ipv4 neighbor %s prefix-counts json'" % bgp_neighbor + else: + cmd = "vtysh -c 'show bgp ipv6 neighbor %s prefix-counts json'" % bgp_neighbor + for namespace in duthost.get_frontend_asic_namespace_list(): + cmd = duthost.get_vtysh_cmd_for_namespace(cmd, namespace) + cmd_result = json.loads(duthost.shell(cmd, verbose=False)["stdout"]) + logging.debug("Prefix counters for bgp neighbor %s in namespace %s:\n%s\n", bgp_neighbor, namespace, cmd_result) + assert cmd_result["ribTableWalkCounters"]["Stale"] == cmd_result["ribTableWalkCounters"]["All RIB"] + + def _verify_bgp_neighbor_routes_after_graceful_restart(neighbor_routes, rib): + for prefix, nexthops in neighbor_routes.items(): + if prefix not in rib: + pytest.fail("Route to prefix %s doesn't exist after graceful restart." % prefix) + nexthop_expected = nexthops[0] + bgp_neighbor_expected = nexthop_expected["peerId"] + for nexthop in rib[prefix]: + if nexthop["peerId"] == bgp_neighbor_expected: + if nexthop.get("stale", False) is False: + pytest.fail( + "Route to prefix %s should be stale after graceful restart, before: %s, after: %s" % (prefix, nexthop_expected, rib[prefix]) + ) + break + else: + pytest.fail( + "Route to prefix doesn't originate from BGP neighbor %s, before: %s, after: %s" % (bgp_neighbor_expected, nexthop_expected, rib[prefix]) + ) + + duthost = duthosts[rand_one_dut_hostname] + + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) + dev_nbrs = config_facts.get('DEVICE_NEIGHBOR', {}) + + test_interface = random.sample([k for k, v in dev_nbrs.items() if not v['name'].startswith("Server")], 1)[0] + test_neighbor_name = dev_nbrs[test_interface]['name'] + test_neighbor_host = nbrhosts[test_neighbor_name]['host'] + + # get neighbor BGP peers + test_bgp_neighbors = _find_test_bgp_neighbors(test_neighbor_name, bgp_neighbors) + + logging.info("Select neighbor %s to verify that all bgp routes are preserved during graceful restart", test_neighbor_name) + + # get all routes received from neighbor before GR + all_neighbor_routes_before_gr = {} + for test_bgp_neighbor in test_bgp_neighbors: + all_neighbor_routes_before_gr.update(_get_learned_bgp_routes_from_neighbor(duthost, test_bgp_neighbor)) + # limit testing routes to 100 entries to save time + test_route_count = min(100, len(all_neighbor_routes_before_gr)) + neighbor_routes_before_gr = dict(random.sample(all_neighbor_routes_before_gr.items(), test_route_count)) + + try: + # shutdown Rib agent, starting GR process + logger.info("shutdown rib process on neighbor {}".format(test_neighbor_name)) + test_neighbor_host.kill_bgpd() + + # wait till DUT enters NSF state + for test_bgp_neighbor in test_bgp_neighbors: + pytest_assert( + wait_until(60, 5, duthost.check_bgp_session_nsf, test_bgp_neighbor), + "neighbor {} does not enter NSF state".format(test_bgp_neighbor) + ) + + # confirm routes from the neighbor still there + rib_after_gr = _get_rib(duthost) + for test_bgp_neighbor in test_bgp_neighbors: + _verify_prefix_counters_from_neighbor_after_graceful_restart(duthost, test_bgp_neighbor) + _verify_bgp_neighbor_routes_after_graceful_restart(neighbor_routes_before_gr, rib_after_gr) + finally: + # start Rib agent + logging.info("start rib process on neighbor %s", test_neighbor_name) + test_neighbor_host.start_bgpd() + + if not wait_until(300, 10, duthost.check_bgp_session_state, test_bgp_neighbors): + pytest.fail("Not all bgp sessions are up after starting BGP on neighbor %s." % test_neighbor_name) From 623c31611d90dd430487242fe5c1a73eb53c6f35 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Sun, 8 Aug 2021 10:07:05 -0700 Subject: [PATCH 068/117] [kvmtest]: disable test_bgp_gr_helper_all_routes_preserved temporarily test seems flaky. Signed-off-by: Guohan Lu --- tests/kvmtest.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh index 69a2d9dc829..a26f659aaf1 100755 --- a/tests/kvmtest.sh +++ b/tests/kvmtest.sh @@ -99,7 +99,7 @@ test_t0() { test_interfaces.py \ arp/test_arp_dualtor.py \ bgp/test_bgp_fact.py \ - bgp/test_bgp_gr_helper.py \ + bgp/test_bgp_gr_helper.py::test_bgp_gr_helper_routes_perserved \ bgp/test_bgp_speaker.py \ bgp/test_bgp_update_timer.py \ cacl/test_ebtables_application.py \ @@ -247,7 +247,7 @@ if [ x$test_suite == x"t0" ]; then elif [ x$test_suite == x"t1-lag" ]; then test_t1_lag elif [ x$test_suite == x"multi-asic-t1-lag" ]; then - test_multi_asic_t1_lag + test_multi_asic_t1_lag elif [ x$test_suite == x"t2" ]; then test_t2 else From a74ff7e959e1a9ac2ee44e8581366c08508079c4 Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Mon, 9 Aug 2021 11:16:22 +0800 Subject: [PATCH 069/117] Fix case senstive issue for test_show_platform_syseeprom (#3807) What is the motivation for this PR? In test_show_platform_syseeprom, we can get the syseeprom from two ways. One is from show platform syseeprom like below. The 'Code' column is upper case: show platform syseeprom TlvInfo Header: Id String: TlvInfo Version: 1 Total Length: 627 TLV Name Code Len Value -------------------- ---- --- ----- Product Name 0x21 64 MSN4410 Part Number 0x22 20 MSN4410-WS2FO Serial Number 0x23 24 MT2039X06760 Base MAC Address 0x24 6 1C:34:DA:23:45:00 Manufacture Date 0x25 19 09/23/2020 19:26:56 Device Version 0x26 1 0 Platform Name 0x28 64 x86_64-mlnx_msn4410-r0 ONIE Version 0x29 21 2020.11-5.3.0005-9600 MAC Addresses 0x2A 2 254 Manufacturer 0x2B 8 Mellanox Vendor Extension 0xFD 36 Vendor Extension 0xFD 164 Vendor Extension 0xFD 36 Vendor Extension 0xFD 36 Vendor Extension 0xFD 36 Vendor Extension 0xFD 52 CRC-32 0xFE 4 0x108A42F6 The other one is from dut_vars like below. The key is lower case: {'0x28': u'x86_64-mlnx_msn4410-r0', '0x29': u'2020.11-5.3.0005-9600', '0x22': u'MSN4410-WS2FO', '0x23': u'MT2039X06760', '0x21': u'MSN4410', '0x26': u'0', '0x24': u'1C:34:DA:23:45:00', '0x25': u'09/23/2020 19:26:56', '0xfe': u'0x108A42F6', '0x2b': u'Mellanox', '0x2a': u'254'} So when compare the code key like follow, it will fail. for field in expected_syseeprom_info_dict: pytest_assert(field in parsed_syseeprom, "Expected field '{}' not present in syseeprom on '{}'".format(field, duthost.hostname)) How did you do it? Use all lower case field name in the result dict while parsing the output of "show platform syseeprom". --- tests/platform_tests/cli/test_show_platform.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/platform_tests/cli/test_show_platform.py b/tests/platform_tests/cli/test_show_platform.py index eaba87696a9..45bce99807b 100644 --- a/tests/platform_tests/cli/test_show_platform.py +++ b/tests/platform_tests/cli/test_show_platform.py @@ -130,13 +130,14 @@ def test_show_platform_syseeprom(duthosts, enum_rand_one_per_hwsku_hostname, dut for line in syseeprom_output_lines[6:]: t1 = regex_int.match(line) if t1: - parsed_syseeprom[t1.group(2).strip()] = t1.group(4).strip() + tlv_code_lower_case = t1.group(2).strip().lower() + parsed_syseeprom[tlv_code_lower_case] = t1.group(4).strip() for field in expected_syseeprom_info_dict: - pytest_assert(field in parsed_syseeprom, "Expected field '{}' not present in syseeprom on '{}'".format(field, duthost.hostname)) - pytest_assert(parsed_syseeprom[field] == expected_syseeprom_info_dict[field], + pytest_assert(field.lower() in parsed_syseeprom, "Expected field '{}' not present in syseeprom on '{}'".format(field, duthost.hostname)) + pytest_assert(parsed_syseeprom[field.lower()] == expected_syseeprom_info_dict[field], "System EEPROM info is incorrect - for '{}', rcvd '{}', expected '{}' on '{}'". - format(field, parsed_syseeprom[field], expected_syseeprom_info_dict[field], duthost.hostname)) + format(field, parsed_syseeprom[field.lower()], expected_syseeprom_info_dict[field], duthost.hostname)) if duthost.facts["asic_type"] in ["mellanox"]: expected_fields = [ From 2024e7b3c260866de0052380f3ded1d198713b83 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Mon, 9 Aug 2021 08:10:35 -0700 Subject: [PATCH 070/117] [everflow] Skip everflow tests on backend topology (#3975) Everflow tests should not be run on backend topology Fixes #3976 Signed-off-by: Neetha John How did you verify/test it? SKIPPED [60] /var/nejo/Networking-acs-sonic-mgmt/tests/everflow/conftest.py:16: Skipping everflow tests. Unsupported topology t1-backend --- tests/everflow/conftest.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 tests/everflow/conftest.py diff --git a/tests/everflow/conftest.py b/tests/everflow/conftest.py new file mode 100644 index 00000000000..0ee69e7b002 --- /dev/null +++ b/tests/everflow/conftest.py @@ -0,0 +1,15 @@ +import pytest + +@pytest.fixture(scope="module", autouse=True) +def skip_everflow_test(tbinfo): + """ + Skip everflow tests on certain testbed types + + Args: + tbinfo(fixture): testbed related info fixture + + Yields: + None + """ + if 'backend' in tbinfo['topo']['name']: + pytest.skip("Skipping everflow tests. Unsupported topology {}".format(tbinfo['topo']['name'])) From 6e632621c4aff682eb6888d8abd0d5d3568e1988 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Mon, 9 Aug 2021 08:13:08 -0700 Subject: [PATCH 071/117] [dhcp_relay] Skip dhcp relay tests on backend topology (#3977) Fixes #3851 No dhcp relay config on backend. Hence these tests need to be skipped on backend topology Signed-off-by: Neetha John How did you verify/test it? Verified that these tests are skipped on both 't0-backend' and 't1-backend' SKIPPED [4] /var/nejo/Networking-acs-sonic-mgmt/tests/dhcp_relay/conftest.py:16: Skipping dhcp relay tests. Unsupported topology t1-backend SKIPPED [10] dhcp_relay/test_dhcp_relay.py: test requires topology in Mark(name='topology', args=('t0',), kwargs={}) SKIPPED [10] dhcp_relay/test_dhcp_relay.py: test requires topology in Mark(name='topology', args=('t0',), kwargs={}) SKIPPED [4] /var/nejo/Networking-acs-sonic-mgmt/tests/dhcp_relay/conftest.py:16: Skipping dhcp relay tests. Unsupported topology t0-backend --- tests/dhcp_relay/conftest.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 tests/dhcp_relay/conftest.py diff --git a/tests/dhcp_relay/conftest.py b/tests/dhcp_relay/conftest.py new file mode 100644 index 00000000000..00dfd74a812 --- /dev/null +++ b/tests/dhcp_relay/conftest.py @@ -0,0 +1,15 @@ +import pytest + +@pytest.fixture(scope="module", autouse=True) +def skip_dhcp_relay_tests(tbinfo): + """ + Skip dhcp relay tests on certain testbed types + + Args: + tbinfo(fixture): testbed related info fixture + + Yields: + None + """ + if 'backend' in tbinfo['topo']['name']: + pytest.skip("Skipping dhcp relay tests. Unsupported topology {}".format(tbinfo['topo']['name'])) From ca2017396e520e69bcddcca8e1a91a3b43c7fd4a Mon Sep 17 00:00:00 2001 From: Neetha John Date: Mon, 9 Aug 2021 08:20:15 -0700 Subject: [PATCH 072/117] [pfcwd] Skip pfcwd warm reboot tests on Td2 (#3983) Warm reboot is not supported on Td2. Hence this test should be skipped Fixes #3984 Signed-off-by: Neetha John How did you verify/test it? platform linux2 -- Python 2.7.12, pytest-4.6.9, py-1.8.1, pluggy-0.13.1 ansible: 2.8.7 rootdir: /var/nejo/Networking-acs-sonic-mgmt/tests, inifile: pytest.ini plugins: ansible-2.2.2 collected 3 items pfcwd/test_pfcwd_warm_reboot.py::TestPfcwdWb::test_pfcwd_wb[no_storm-None] SKIPPED [ 33%] pfcwd/test_pfcwd_warm_reboot.py::TestPfcwdWb::test_pfcwd_wb[storm-None] SKIPPED [ 66%] pfcwd/test_pfcwd_warm_reboot.py::TestPfcwdWb::test_pfcwd_wb[async_storm-None] SKIPPED [100%] SKIPPED [3] /var/nejo/Networking-acs-sonic-mgmt/tests/common/helpers/assertions.py:13: Warm reboot is not supported on td2 --- tests/pfcwd/test_pfcwd_warm_reboot.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tests/pfcwd/test_pfcwd_warm_reboot.py b/tests/pfcwd/test_pfcwd_warm_reboot.py index b5db0d9482c..d6ba73a7cd8 100644 --- a/tests/pfcwd/test_pfcwd_warm_reboot.py +++ b/tests/pfcwd/test_pfcwd_warm_reboot.py @@ -6,8 +6,9 @@ import time import traceback +from tests.common.broadcom_data import is_broadcom_device from tests.common.fixtures.conn_graph_facts import fanout_graph_facts -from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.helpers.pfc_storm import PFCStorm from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer from tests.common.reboot import reboot @@ -37,6 +38,23 @@ logger = logging.getLogger(__name__) +@pytest.fixture(scope="module", autouse=True) +def skip_pfcwd_wb_tests(duthosts, rand_one_dut_hostname): + """ + Skip Pfcwd warm reboot tests on certain asics + + Args: + duthosts (pytest fixture): list of Duts + rand_one_dut_hostname (str): hostname of DUT + + Returns: + None + """ + duthost = duthosts[rand_one_dut_hostname] + SKIP_LIST = ["td2"] + asic_type = duthost.get_asic_name() + pytest_require(not (is_broadcom_device(duthost) and asic_type in SKIP_LIST), "Warm reboot is not supported on {}".format(asic_type)) + @pytest.fixture(autouse=True) def setup_pfcwd(duthosts, rand_one_dut_hostname): """ From 894e670b035d359630cf7c784eaefd9b1564fa93 Mon Sep 17 00:00:00 2001 From: ppikh <70200079+ppikh@users.noreply.github.com> Date: Tue, 10 Aug 2021 06:08:54 +0300 Subject: [PATCH 073/117] Added possibility to skip tests dynamically by GitHub issue (#3577) Added possibility to skip tests dynamically by GitHub issue For now we can not ignore test case when we have GitHub issue opened. This commit allows to ignore tests dynamically based on GitHub issue status. What is the motivation for this PR? For now we can not ignore test case when we have GitHub issue opened. This commit allows to ignore tests dynamically based on GitHub issue status. How did you do it? Implemented plugin which allow us to query GitHub api and skip test if we have active GitHub issue. How did you verify/test it? Executed pytest tests when plugin active and when plugin is not active(when no ignore file) Any platform specific information? Supported testbed topology if it's a new test case? Documentation Added README.md file Signed-off-by: Petro Pikh --- .../plugins/custom_skipif/CustomSkipIf.py | 216 ++++++++++++++++++ tests/common/plugins/custom_skipif/GitHub.py | 101 ++++++++ .../common/plugins/custom_skipif/Platform.py | 70 ++++++ tests/common/plugins/custom_skipif/README.md | 56 +++++ .../common/plugins/custom_skipif/__init__.py | 0 .../plugins/custom_skipif/credentials.yaml | 5 + ...s_to_be_skipped_conditionally.yaml.example | 122 ++++++++++ tests/conftest.py | 1 + 8 files changed, 571 insertions(+) create mode 100644 tests/common/plugins/custom_skipif/CustomSkipIf.py create mode 100644 tests/common/plugins/custom_skipif/GitHub.py create mode 100644 tests/common/plugins/custom_skipif/Platform.py create mode 100644 tests/common/plugins/custom_skipif/README.md create mode 100644 tests/common/plugins/custom_skipif/__init__.py create mode 100644 tests/common/plugins/custom_skipif/credentials.yaml create mode 100644 tests/common/plugins/custom_skipif/tests_to_be_skipped_conditionally.yaml.example diff --git a/tests/common/plugins/custom_skipif/CustomSkipIf.py b/tests/common/plugins/custom_skipif/CustomSkipIf.py new file mode 100644 index 00000000000..bf523389d8b --- /dev/null +++ b/tests/common/plugins/custom_skipif/CustomSkipIf.py @@ -0,0 +1,216 @@ +import multiprocessing +import yaml +import pytest +import logging +import os +import sys + +from abc import ABCMeta, abstractmethod + +logger = logging.getLogger() + +CUSTOM_SKIP_IF_DICT = 'custom_skip_if_dict' +CUSTOM_TEST_SKIP_PLATFORM_TYPE = 'dynamic_tests_skip_platform_type' +PLATFORM = 'Platform' + + +def pytest_collection(session): + initialize_cached_variables(session) + + +def initialize_cached_variables(session): + session.config.cache.set(CUSTOM_SKIP_IF_DICT, None) + session.config.cache.set(CUSTOM_TEST_SKIP_PLATFORM_TYPE, None) + + +def pytest_runtest_setup(item): + """ + Skip tests conditionally based on the user_tests_to_be_skipped list + """ + skip_tests_file_path = get_tests_to_be_skipped_path() + if os.path.exists(skip_tests_file_path): + skip_tests_dict = read_skip_file(item, skip_tests_file_path) + update_syspath_for_dynamic_import() + + for test_prefix, skip_list_of_dicts in skip_tests_dict.items(): + if test_in_skip_list(item, test_prefix): + logger.debug('Found custom skip condition: {}'.format(test_prefix)) + make_skip_decision(skip_list_of_dicts, item) + + +def get_tests_to_be_skipped_path(skip_tests_file='tests_to_be_skipped_conditionally.yaml'): + """ + Get path to file with dynamic skip information + :param skip_tests_file: skip test file name + :return: full path to skip test file name + """ + custom_skip_folder_path = os.path.dirname(__file__) + custom_skip_tests_file_path = os.path.join(custom_skip_folder_path, skip_tests_file) + return custom_skip_tests_file_path + + +def read_skip_file(item, skip_tests_file_path): + """ + Read yaml file with list of test cases which should be skipped + :param item: pytest test item + :param skip_tests_file_path: path to file where stored list of test cases which should be skipped + :return: yaml loaded dictionary + """ + skip_dictionary = item.session.config.cache.get(CUSTOM_SKIP_IF_DICT, None) + if not skip_dictionary: + with open(skip_tests_file_path) as skip_data: + logger.debug('Reading dynamic skip file: {}'.format(skip_tests_file_path)) + skip_dictionary = yaml.load(skip_data, Loader=yaml.FullLoader) + item.session.config.cache.set(CUSTOM_SKIP_IF_DICT, skip_dictionary) + return skip_dictionary + + +def update_syspath_for_dynamic_import(): + """ + Update sys.path by current folder to have possibility to load python modules dynamically + """ + if os.path.dirname(__file__) not in sys.path: + sys.path.append(os.path.dirname(__file__)) + + +def test_in_skip_list(item, test_prefix): + """ + Check if current test in skip list + :param item: pytest test item + :param test_prefix: test prefix from ignore yaml file + :return: True/False + """ + return str(item.nodeid).startswith(test_prefix) + + +def make_skip_decision(skip_list_of_dicts, item): + """ + Make a final decision about whether to skip the test by combining the results of all the skip statements. + :param skip_list_of_dicts: list with data which we read from ignore yaml file + :param item: pytest test item + :return: None or pytest.skip in case when we need to skip test + """ + skip_result_list = [] + skip_reason_str = '' + + for skip_dict_entry in skip_list_of_dicts: + if is_nested_dict(skip_dict_entry): + skip_reason_str = update_skip_results(skip_dict_entry, item, 'and', skip_result_list, skip_reason_str) + else: + skip_reason_str = update_skip_results(skip_dict_entry, item, 'or', skip_result_list, skip_reason_str) + + # Make final decision + if any(skip_result_list): + pytest.skip(skip_reason_str) + + +def is_nested_dict(dict_obj): + nested_dict_min_len = 2 + return len(dict_obj) >= nested_dict_min_len + + +def update_skip_results(skip_dict, item, operand, skip_result_list, skip_reason_str): + """ + Get results from skip checkers and update skip_result_list and skip_reason_str + :param skip_dict: dictionary with data which we read from ignore yaml file + :param item: pytest test item + :param operand: operand which will be used between skip by items, can be "or", "and" + :param skip_result_list: list which we update according to checkers results + :param skip_reason_str: skip reason string which we update according to checkers results + :return: skip_reason_str - string which contains skip reason + """ + skip_required, skip_reason = get_checkers_result(skip_dict, item, operand) + skip_result_list.append(skip_required) + skip_reason_str += skip_reason + return skip_reason_str + + +def get_checkers_result(skip_dict, item, operand='or'): + """ + Get results about whether to skip the test by combining the results of all the skip statements. + :param skip_dict: dictionary with skip test case skip conditions + :param item: pytest build-in + :param operand: operand which will be used to make decision about skip + :return: True/False and string with skip reason + """ + skip_reason = '' + checkers_result = [] + + skip_checkers_list = prepare_checkers(skip_dict, item) + skip_dict_result = run_checkers_in_parallel(skip_checkers_list) + + for checker, checker_result in skip_dict_result.items(): + if checker_result: + skip_reason += '\nTest skipped due to {}: {}'.format(checker, checker_result) + checkers_result.append(True) + else: + checkers_result.append(False) + + if operand == 'or': + skip_required = any(checkers_result) + else: + skip_required = all(checkers_result) + + if not skip_required: + skip_reason = '' + + return skip_required, skip_reason + + +def prepare_checkers(skip_dict, pytest_item_obj): + """ + Import dynamically checker modules and initialize them + :param skip_dict: dictionary with skip test case skip conditions + :param pytest_item_obj: pytest build in + :return: list with checkers objects + """ + skip_checkers_list = [] + for skip_by in skip_dict: + logger.debug('Importing dynamic skip module: {}'.format(skip_by)) + try: + skip_module_obj = __import__(skip_by).SkipIf(skip_dict[skip_by], pytest_item_obj) + skip_checkers_list.append(skip_module_obj) + except Exception as err: + logger.error('Unable to load dynamically skip object: {}'.format(err)) + return skip_checkers_list + + +def run_checkers_in_parallel(skip_checkers_list): + """ + Run checkers in parallel and return results + :param skip_checkers_list: list with checkers objects + :return: dictionary with checkers result + """ + manager = multiprocessing.Manager() + skip_dict_result = manager.dict() + + proc_list = list() + + for skip_check in skip_checkers_list: + skip_dict_result[skip_check.name] = None + proc_list.append(multiprocessing.Process(target=skip_check.is_skip_required, args=(skip_dict_result,))) + + for proc in proc_list: + proc.start() + for proc in proc_list: + proc.join(timeout=60) + + return skip_dict_result + + +class CustomSkipIf: + __metaclass__ = ABCMeta + + def __init__(self, ignore_list, pytest_item_obj): + # self.name = 'CustomSkipIf' # Example: Platform, Jira, Redmine - should be defined in each child class + self.ignore_list = ignore_list + self.pytest_item_obj = pytest_item_obj + + @abstractmethod + def is_skip_required(self, skip_dict_result): + """ + Decide whether or not to skip a test + :param skip_dict_result: shared dictionary with data about skip test + :return: updated skip_dict + """ + return skip_dict_result diff --git a/tests/common/plugins/custom_skipif/GitHub.py b/tests/common/plugins/custom_skipif/GitHub.py new file mode 100644 index 00000000000..a4a9ae92207 --- /dev/null +++ b/tests/common/plugins/custom_skipif/GitHub.py @@ -0,0 +1,101 @@ +import requests +import logging +import yaml +import os + +from CustomSkipIf import CustomSkipIf + +logger = logging.getLogger() + + +class SkipIf(CustomSkipIf): + def __init__(self, ignore_list, pytest_item_obj): + super(SkipIf, self).__init__(ignore_list, pytest_item_obj) + self.name = 'GitHub' + self.credentials = self.get_cred() + + def get_cred(self): + """ + Get GitHub API credentials + :return: dictionary with GitHub credentials {'user': aaa, 'api_token': 'bbb'} + """ + cred_file_name = 'credentials.yaml' + cred_folder_path = os.path.dirname(__file__) + cred_file_path = os.path.join(cred_folder_path, cred_file_name) + + with open(cred_file_path) as cred_file: + cred = yaml.load(cred_file, Loader=yaml.FullLoader).get(self.name) + + return cred + + def is_skip_required(self, skip_dict_result): + """ + Make decision about ignore - is it required or not + :param skip_dict_result: shared dictionary with data about skip test + :return: updated skip_dict_result + """ + github_api = GitHubApi(self.credentials.get('user'), self.credentials.get('api_token')) + + for github_issue in self.ignore_list: + if github_api.is_github_issue_active(github_issue): + skip_dict_result[self.name] = github_issue + break + return skip_dict_result + + +class GitHubApi: + """ + This class allows user to query github issues status + Usage example: + github = GitHubApi('user', 'api_token') + github.is_github_issue_active(github_issue) + """ + + def __init__(self, github_username, api_token): + self.auth = (github_username, api_token) + + @staticmethod + def get_github_issue_api_url(issue_url): + """ + Get correct github api URL based on browser URL from user + :param issue_url: github issue url + :return: github issue api url + """ + return issue_url.replace('github.com', 'api.github.com/repos') + + def make_github_request(self, url): + """ + Send API request to github + :param url: github api url + :return: dictionary with data + """ + response = requests.get(url, auth=self.auth) + response.raise_for_status() + return response.json() + + def is_github_issue_active(self, issue_url): + """ + Check that issue active or not + :param issue_url: github issue URL + :return: True/False + """ + issue_url = self.get_github_issue_api_url(issue_url) + response = self.make_github_request(issue_url) + if response.get('state') == 'closed': + if self.is_duplicate(response): + logger.warning('GitHub issue: {} looks like duplicate and was closed. Please re-check and ignore' + 'the test on the parent issue'.format(issue_url)) + return False + return True + + @staticmethod + def is_duplicate(issue_data): + """ + Check if issue duplicate or note + :param issue_data: github response dict + :return: True/False + """ + for label in issue_data['labels']: + if 'duplicate' in label['name'].lower(): + return True + return False diff --git a/tests/common/plugins/custom_skipif/Platform.py b/tests/common/plugins/custom_skipif/Platform.py new file mode 100644 index 00000000000..0ee7f6fbf99 --- /dev/null +++ b/tests/common/plugins/custom_skipif/Platform.py @@ -0,0 +1,70 @@ +import logging +import subprocess + +from CustomSkipIf import CustomSkipIf, CUSTOM_TEST_SKIP_PLATFORM_TYPE + +logger = logging.getLogger() + + +class SkipIf(CustomSkipIf): + def __init__(self, ignore_list, pytest_item_obj): + super(SkipIf, self).__init__(ignore_list, pytest_item_obj) + self.name = 'Platform' + + def is_skip_required(self, skip_dict_result): + """ + Make decision about ignore - is it required or not + :param skip_dict_result: shared dictionary with data about skip test + :return: updated skip_dict + """ + current_platform = get_platform_type(self.pytest_item_obj) + for platform in self.ignore_list: + if str(platform) in current_platform: + skip_dict_result[self.name] = platform + break + + return skip_dict_result + + +def get_platform_type(pytest_item_obj): + """ + Get current platform type using ansible and store it in pytest.session.config.cache + :param pytest_item_obj: pytest test item + :return: platform_type - string with current platform type + """ + platform_type = pytest_item_obj.session.config.cache.get(CUSTOM_TEST_SKIP_PLATFORM_TYPE, None) + if not platform_type: + logger.debug('Getting platform from DUT') + host = pytest_item_obj.session.config.option.ansible_host_pattern + inventory = pytest_item_obj.session.config.option.ansible_inventory + inv = get_inventory_argument(inventory) + show_platform_cmd = 'ansible {} {} -a "show platform summary"'.format(host, inv) + + try: + show_platform_summary_raw_output = subprocess.check_output(show_platform_cmd, shell=True) + for line in show_platform_summary_raw_output.splitlines(): + if 'Platform:' in line: + platform_type = line.split()[1:][0] # get platform, example: x86_64-mlnx_msn2700-r0 + pytest_item_obj.session.config.cache.set(CUSTOM_TEST_SKIP_PLATFORM_TYPE, platform_type) + break + except Exception as err: + logger.error('Unable to get platform type. Custom skip by platform impossible. Error: {}'.format(err)) + else: + logger.debug('Getting platform from pytest cache') + + logger.debug('Current platform type is: {}'.format(platform_type)) + return platform_type + + +def get_inventory_argument(inventory): + """Get Ansible inventory arguments""" + inv = '' + + if type(inventory) is list: + for inv_item in inventory: + inv += ' -i {}'.format(inv_item) + else: + for inv_item in inventory.split(','): + inv += ' -i {}'.format(inv_item) + + return inv diff --git a/tests/common/plugins/custom_skipif/README.md b/tests/common/plugins/custom_skipif/README.md new file mode 100644 index 00000000000..ac1922c3cea --- /dev/null +++ b/tests/common/plugins/custom_skipif/README.md @@ -0,0 +1,56 @@ +#### Custom_skipif plugin usage example + +Below is described possibility of custom_skipif plugin usage. + +##### Structure +custom_skipif plugin allows to skip test cases dynamically based on GitHub issue or Platform + +Plugin can use different issue/ticket systems: GitHub, Jira, Redmine etc. +By default implemented only GitHub support. Jira, Redmine etc. can be implemented by users. + +##### How it works +By default plugin will do nothing. +If in plugin folder available file "tests_to_be_skipped_conditionally.yaml" - it will read this file and do skip for +tests according to this file. + +Example how to skip test "test platform_tests/cli/test_show_platform.py::test_show_platform_fan" +Code below will skip test if we have active issue in: ((GitHub or Jira or Redmine) and current platform is "msn4600") + +```` +platform_tests/cli/test_show_platform.py::test_show_platform_fan: + - GitHub: + - https://github.com/Azure/sonic-buildimage/issues/7602 + - https://github.com/Azure/sonic-buildimage/issues/7643 + Platform: + - msn4600 + - Jira: + - http://jira.aaa.com/1234 + Platform: + - msn4600 + - Redmine: + - http://redmine.bbb.com/1234 + Platform: + - msn4600 +```` + +##### How to add additional issues/tickets system support(Jira, Redmine, etc.) +To add support for additional issues/tickets system we need to do next(example below for Redmine): +- Create file called "Redmine.py" +- In file "Readmine.py" create class with name "SkipIf" and inherit it from "CustomSkipIf" class +```` +from CustomSkipIf import CustomSkipIf + +class SkipIf(CustomSkipIf): + def __init__(self, ignore_list, pytest_item_obj): + super(SkipIf, self).__init__(ignore_list, pytest_item_obj) + self.name = 'Redmine' + + def is_skip_required(self, skip_dict): + is_issue_active, issue_id = is_redmine_issue_active(self.ignore_list) + if is_issue_active: + issue_url = 'https://redmine.bbb.com/issues/{}'.format(issue_id) + skip_dict[self.name] = issue_url + + return skip_dict +```` +- Add to file "tests_to_be_skipped_conditionally.yaml" skip item for specific test case by Redmine issue id diff --git a/tests/common/plugins/custom_skipif/__init__.py b/tests/common/plugins/custom_skipif/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/common/plugins/custom_skipif/credentials.yaml b/tests/common/plugins/custom_skipif/credentials.yaml new file mode 100644 index 00000000000..c06693ce829 --- /dev/null +++ b/tests/common/plugins/custom_skipif/credentials.yaml @@ -0,0 +1,5 @@ +# Below is example - user should uncomment it and replace data by correct credentials + +#GitHub: +# user: petrop +# api_token: ghp_qwertyuiopasdfghjklzxcvbnm diff --git a/tests/common/plugins/custom_skipif/tests_to_be_skipped_conditionally.yaml.example b/tests/common/plugins/custom_skipif/tests_to_be_skipped_conditionally.yaml.example new file mode 100644 index 00000000000..63e1d9cb28d --- /dev/null +++ b/tests/common/plugins/custom_skipif/tests_to_be_skipped_conditionally.yaml.example @@ -0,0 +1,122 @@ +# It is example - of file with skip test cases by issue or by platform. +# Original skip test cases file should be named "tests_to_be_skipped_conditionally.yaml" + +## Ignore all tests in folder platform_tests/cli if any issue(GitHub or Jira or Redmine) active or platform msn4600 +#platform_tests/cli: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# - Jira: +# - http://jira.aaa.com/1234 +# - Redmine: +# - http://redmine.bbb.com/1234 +# - Platform: +# - msn4600 +# +## Ignore all tests in file platform_tests/cli/test_show_platform.py if any issue(GitHub or Jira or Redmine) active or platform msn4600 +#platform_tests/cli/test_show_platform.py: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# - Jira: +# - http://jira.aaa.com/1234 +# - Redmine: +# - http://redmine.bbb.com/1234 +# - Platform: +# - msn4600 +# +## Ignore specific test platform_tests/cli/test_show_platform.py::test_show_platform_fan if any issue(GitHub or Jira or Redmine) active or platform msn4600 +#platform_tests/cli/test_show_platform.py::test_show_platform_fan: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# - Jira: +# - http://jira.aaa.com/1234 +# - Redmine: +# - http://redmine.bbb.com/1234 +# - Platform: +# - msn4600 +# +## Ignore all tests in folder platform_tests/cli if any issue(GitHub or Jira or Redmine) active and platform msn4600 +#platform_tests/cli: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# Platform: +# - msn4600 +# - Jira: +# - http://jira.aaa.com/1234 +# Platform: +# - msn4600 +# - Redmine: +# - http://redmine.bbb.com/1234 +# Platform: +# - msn4600 +# +## Ignore all tests in file platform_tests/cli/test_show_platform.py if any issue(GitHub or Jira or Redmine) active and platform msn4600 +#platform_tests/cli/test_show_platform.py: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# Platform: +# - msn4600 +# - Jira: +# - http://jira.aaa.com/1234 +# Platform: +# - msn4600 +# - Redmine: +# - http://redmine.bbb.com/1234 +# Platform: +# - msn4600 +# +## Ignore specific test platform_tests/cli/test_show_platform.py::test_show_platform_fan if any issue(GitHub or Jira or Redmine) active or platform msn4600 +#platform_tests/cli/test_show_platform.py::test_show_platform_fan: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# Platform: +# - msn4600 +# - Jira: +# - http://jira.aaa.com/1234 +# Platform: +# - msn4600 +# - Redmine: +# - http://redmine.bbb.com/1234 +# Platform: +# - msn4600 +# +## Ignore all tests in folder platform_tests/cli if platform msn4600 or msn3700 +#platform_tests/cli: +# - Platform: +# - msn4600 +# - msn3700 +# +## Ignore all tests in file platform_tests/cli/test_show_platform.py if platform msn4600 or msn3700 +#platform_tests/cli/test_show_platform.py: +# - Platform: +# - msn4600 +# - msn3700 +# +## Ignore specific test platform_tests/cli/test_show_platform.py::test_show_platform_fan if platform msn4600 or msn3700 +#platform_tests/cli/test_show_platform.py::test_show_platform_fan: +# - Platform: +# - msn4600 +# - msn3700 +# +## Ignore all tests in folder platform_tests/cli if any GitHub issue active +#platform_tests/cli: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# +## Ignore all tests in file platform_tests/cli/test_show_platform.py if any GitHub issue active +#platform_tests/cli/test_show_platform.py: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 +# +## Ignore specific test platform_tests/cli/test_show_platform.py::test_show_platform_fan if any GitHub issue active +#platform_tests/cli/test_show_platform.py::test_show_platform_fan: +# - GitHub: +# - https://github.com/Azure/sonic-buildimage/issues/7602 +# - https://github.com/Azure/sonic-buildimage/issues/7643 diff --git a/tests/conftest.py b/tests/conftest.py index dfef588440c..7b38fd05fb9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -48,6 +48,7 @@ 'tests.common.plugins.pdu_controller', 'tests.common.plugins.sanity_check', 'tests.common.plugins.custom_markers', + 'tests.common.plugins.custom_skipif.CustomSkipIf', 'tests.common.plugins.test_completeness', 'tests.common.plugins.log_section_start', 'tests.common.plugins.custom_fixtures', From c0d365057dd6e799e0e6b19334097fd95ce095fb Mon Sep 17 00:00:00 2001 From: andriyz-nv <80095786+andriyz-nv@users.noreply.github.com> Date: Tue, 10 Aug 2021 06:12:54 +0300 Subject: [PATCH 074/117] Fix test_vnet_vxlan 3k failing because arp_responder was not running, added sleep (#3642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What is the motivation for this PR? Fix for test_vnet_vxlan 3k when arp_responer was started but not fully functioning. Sleep ensure that during the packet was send it will be catched on VM side How did you do it? How did you verify/test it? FIxed issue that packet was send FromServer but not catched on FromVm side "stderr": "WARNING: No route found for IPv6 destination :: (no default route?)\n/usr/local/lib/python2.7/dist-packages/paramiko/transport.py:33: CryptographyDeprecationWarning: Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography, and will be removed in the next release.\n from cryptography.hazmat.backends import default_backend\nvnet_vxlan.VNET ... FAIL\n\n======================================================================\nFAIL: vnet_vxlan.VNET\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File "ptftests/vnet_vxlan.py", line 325, in runTest\n self.FromVM(test)\n File "ptftests/vnet_vxlan.py", line 398, in FromVM\n verify_packet(self, exp_pkt, test['port'])\n File "/usr/lib/python2.7/dist-packages/ptf/testutils.py", line 2405, in verify_packet\n % (device, port, result.format()))\nAssertionError: Expected packet was not received on device 0, port 2.\n========== EXPECTED ==========\ndst : DestMACField = '24:8a:07🆎9f:02' (None)\nsrc : SourceMACField = u'98:03:9b:f3:f5:80' (None)\ntype : XShortEnumField = 33024 (0)\n--\nprio : BitField = 0 (0)\nid : BitField = 0 (0)\nvlan : BitField = 3001 (1)\ntype : XShortEnumField = 2048 (0)\n--\nversion : BitField = 4 (4)\nihl : BitField = None (None)\ntos : XByteField = 0 (0)\nlen : ShortField = None (None)\nid : ShortField = 108 (1)\nflags : FlagsField = 0 (0)\nfrag : BitField = 0 (0)\nttl : ByteField = 63 (64)\nproto : ByteEnumField = 6 (0)\nchksum : XShortField = None (None)\nsrc : Emph = u'100.1.1.1' (None)\ndst : Emph = u'30.1.10.102' ('127.0.0.1')\noptions : PacketListField = [] ([])\n--\nsport : ShortEnumField = 1234 (20)\ndport : ShortEnumField = 5000 (80)\nseq : IntField = 0 (0)\nack : IntField = 0 (0)\ndataofs : BitField = None (None)\nreserved : BitField = 0 (0)\nflags : FlagsField = 2 (2)\nwindow : ShortField = 8192 (8192)\nchksum : XShortField = None (None)\nurgptr : ShortField = 0 (0)\noptions : TCPOptionsField = {} ({})\n--\nload : StrField = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()+,-' ('')\n--\n0000 24 8A 07 AB 9F 02 98 03 9B F3 F5 80 81 00 0B B9 $...............\n0010 08 00 45 00 00 56 00 6C 00 00 3F 06 ED CD 64 01 ..E..V.l..?...d.\n0020 01 01 1E 01 0A 66 04 D2 13 88 00 00 00 00 00 00 .....f..........\n0030 00 00 50 02 20 00 ED DE 00 00 00 01 02 03 04 05 ..P. ...........\n0040 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 ................\n0050 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 .......... !"#$%\n0060 26 27 28 29 2A 2B 2C 2D &'()+,-\n========== RECEIVED ==========\n0 total packets.\n==============================\n\n\n----------------------------------------------------------------------\nRan 1 test in 3.027s\n\nFAILED (failures=1)", --- ansible/roles/test/files/ptftests/vnet_vxlan.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ansible/roles/test/files/ptftests/vnet_vxlan.py b/ansible/roles/test/files/ptftests/vnet_vxlan.py index 88adb6fd43b..0c0ef66b36f 100644 --- a/ansible/roles/test/files/ptftests/vnet_vxlan.py +++ b/ansible/roles/test/files/ptftests/vnet_vxlan.py @@ -13,6 +13,7 @@ import os.path import json import ptf +import time import ptf.packet as scapy from ptf.base_tests import BaseTest from ptf import config @@ -300,11 +301,23 @@ def setUp(self): self.generate_ArpResponderConfig() self.cmd(["supervisorctl", "start", "arp_responder"]) - + self.check_arp_responder_running() self.dataplane.flush() return + def check_arp_responder_running(self): + """ + Check arp_responder is in RUNNING state, if not sleep 1 sec and check again + """ + for i in range(5): + output = self.cmd(["supervisorctl", "status", "arp_responder"]) + if 'RUNNING' in output[0]: + break + time.sleep(1) + else: + raise Exception("arp_responder state is not RUNNING! Output: %s" % output) + def tearDown(self): if self.vxlan_enabled: self.cmd(["supervisorctl", "stop", "arp_responder"]) From 9f8c9e4aa8e86722d1788fff07190501b0189dfe Mon Sep 17 00:00:00 2001 From: ppikh <70200079+ppikh@users.noreply.github.com> Date: Tue, 10 Aug 2021 06:15:03 +0300 Subject: [PATCH 075/117] [test_bgp_bbr] Fixed issue "ValueError: Circular reference detected" in case when test failed, removed unused dictionary which has had recursion (#3742) In case when test failed - it raise "ValueError: Circular reference detected". Because we have line which creates recursion: vm_route['tor_route'] = vm_route Key 'tor_route' from dictionary 'vm_route' not used in other places and does not make sense. What is the motivation for this PR? In case when test failed - it raise "ValueError: Circular reference detected". Because we have line which creates recursion: vm_route['tor_route'] = vm_route How did you do it? Removed unused part of code Signed-off-by: Petro Pikh --- tests/bgp/test_bgp_bbr.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/bgp/test_bgp_bbr.py b/tests/bgp/test_bgp_bbr.py index 4179d4ac95c..9e9050cfaef 100644 --- a/tests/bgp/test_bgp_bbr.py +++ b/tests/bgp/test_bgp_bbr.py @@ -248,7 +248,6 @@ def check_other_vms(nbrhosts, setup, route, accepted=True, node=None, results=No vm_route = nbrhosts[node]['host'].get_route(route.prefix) vm_route['failed'] = False vm_route['message'] = 'Checking route {} on {} passed'.format(str(route), node) - vm_route['tor_route'] = vm_route if accepted: if route.prefix not in vm_route['vrfs']['default']['bgpRouteEntries'].keys(): vm_route['failed'] = True From c62d1c58470ea364d89a362747355408e21f1d1a Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Tue, 10 Aug 2021 06:17:26 +0300 Subject: [PATCH 076/117] [drop_counters]: Adopt test to t0-56: handle VLAN IGMP flooding. (#3801) This is a short term solution to overcome t0-56 VLAN IGMP flooding, which significantly slows down PTF packet generator. As result, the RIF counters are incremented very slowly, which causes timeout. Signed-off-by: Nazarii Hnydyn --- tests/common/helpers/drop_counters/drop_counters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/helpers/drop_counters/drop_counters.py b/tests/common/helpers/drop_counters/drop_counters.py index 1f196b28241..11937028a6e 100644 --- a/tests/common/helpers/drop_counters/drop_counters.py +++ b/tests/common/helpers/drop_counters/drop_counters.py @@ -94,7 +94,7 @@ def get_drops_across_all_duthosts(): drop_list.append(int(get_pkt_drops(duthost, get_cnt_cli_cmd, asic_index)[dut_iface][column_key].replace(",", ""))) return drop_list check_drops_on_dut = lambda: packets_count in get_drops_across_all_duthosts() - if not wait_until(5, 1, check_drops_on_dut): + if not wait_until(25, 1, check_drops_on_dut): fail_msg = "'{}' drop counter was not incremented on iface {}. DUT {} == {}; Sent == {}".format( column_key, dut_iface, column_key, get_drops_across_all_duthosts(), packets_count ) From 046ad7f8bf66354b411c418fe6c77115ceef71db Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Mon, 9 Aug 2021 21:39:49 -0700 Subject: [PATCH 077/117] [ser test] refactoring Broadcom ser test and update TD3 parameters (#3991) What is the motivation for this PR? Broadcom SER test was failing on TD3 platform lately. It appears that SAI upgrade exposed/changed ser behavior. How did you do it? Refactor the ser test: Add more control parameters to test script ser_injector.py Categorize memory locations into: timeout, timeout_basic, slow_injection, unsupported. 'thorough' mode is new the learning mode, it will identify the memory entries that doesn't support SER test, also identify the entries that will take long time when injecting error. Avoid slow injection memory locations to improve test time. When avoiding these memory entries. thorough test time on TD3 reduced from 2-3 hours to 40 minutes. basic mode test is also consistently around 8-10 minutes. Update TD3 parameters. How did you verify/test it? Run test on TD3 platform: basic: 40 times, thorough (skipping slow injections): 10 times. --- .../broadcom/files/ser_injector.py | 629 ++++++++++-------- 1 file changed, 351 insertions(+), 278 deletions(-) diff --git a/tests/platform_tests/broadcom/files/ser_injector.py b/tests/platform_tests/broadcom/files/ser_injector.py index dca2377627b..68b7ef0f632 100644 --- a/tests/platform_tests/broadcom/files/ser_injector.py +++ b/tests/platform_tests/broadcom/files/ser_injector.py @@ -18,6 +18,18 @@ DEFAULT_SER_INJECTION_INTERVAL_SEC = 0.1 DEFAULT_SYSLOG_POLLING_INTERVAL_SEC = 0.1 +# Stop trying if stall has been detected for so many consecutive iterations +# Combined with the test duration below. If we don't make progress for so +# long, then we stop waiting. +DEFAULT_STALL_INDICATION = 15 +DEFAULT_SER_TEST_TIME_SEC = 60 +DEFAULT_BATCH_SIZE=10 +DEFAULT_THOROUGH_BATCH_SIZE=20 +DEFAULT_INJECTION_SLOW_SEC = 5 + +# Print verbose output for debugging +VERBOSE=False + """ Follow memory cannot be tested on corresponding platforms for reasons 1). The memory was reported to be corrected at a different location without name. @@ -32,280 +44,300 @@ and lspci signature in get_asic_name() function """ SKIP_MEMORY_PER_ASIC = { - 'td2' : [ - # cannot pass - u'L3_DEFIP_ALPM_IPV4.ipipe0', u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'L3_ENTRY_IPV6_UNICAST.ipipe0', - u'FP_GM_FIELDS.ipipe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', u'L3_DEFIP_ALPM_IPV6_64.ipipe0', - u'L3_DEFIP_ALPM_IPV6_128.ipipe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', u'MODPORT_MAP_MIRROR.ipipe0', - u'EGR_IP_TUNNEL_MPLS.epipe0', - # fail only with basic mode - u'EGR_IP_TUNNEL_IPV6.epipe0', u'EGR_DVP_ATTRIBUTE_1.epipe0', u'EGR_MPLS_VC_AND_SWAP_LABEL_TABLE.epipe0', - u'L3_TUNNEL_DATA_ONLY.ipipe0', + 'td2' : { + 'timeout' : [ + u'L3_DEFIP_ALPM_IPV4.ipipe0', u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'L3_ENTRY_IPV6_UNICAST.ipipe0', + u'FP_GM_FIELDS.ipipe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', u'L3_DEFIP_ALPM_IPV6_64.ipipe0', + u'L3_DEFIP_ALPM_IPV6_128.ipipe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', u'MODPORT_MAP_MIRROR.ipipe0', + u'EGR_IP_TUNNEL_MPLS.epipe0', + ], + 'timeout_basic' : [ + u'EGR_IP_TUNNEL_IPV6.epipe0', u'EGR_DVP_ATTRIBUTE_1.epipe0', u'EGR_MPLS_VC_AND_SWAP_LABEL_TABLE.epipe0', + u'L3_TUNNEL_DATA_ONLY.ipipe0', + ], + 'slow_injection' : [ + ], + 'unsupported' : [ + ] + }, + 'td3' : { + 'timeout' : [ + u'VLAN_SUBNET.ipipe0', u'EGR_ZONE_3_EDITOR_CONTROL_TCAM.epipe0', u'L3_ENTRY_QUAD.ipipe0', + u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', u'RH_HGT_FLOWSET_PIPE1.ipipe0', u'RH_ECMP_FLOWSET.ipipe0', + u'VLAN_XLATE_2_DOUBLE.ipipe0', u'IFP_TCAM_PIPE1.ipipe0', u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', + u'CPU_COS_MAP.ipipe0', u'L3_ENTRY_DOUBLE.ipipe0', u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', + u'EGR_ZONE_1_EDITOR_CONTROL_TCAM.epipe0', u'RH_ECMP_FLOWSET_PIPE0.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', + u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', + u'SRC_COMPRESSION_PIPE0.ipipe0', u'RH_LAG_FLOWSET.ipipe0', u'DST_COMPRESSION.ipipe0', + u'IP_PARSER2_MICE_TCAM_0.ipipe0', u'EGR_VLAN_XLATE_1_SINGLE.epipe0', u'IP_PARSER1_MICE_TCAM_0.ipipe0', + u'L3_DEFIP_ALPM_RAW.ipipe0', u'MPLS_ENTRY_DOUBLE.ipipe0', u'ING_VP_VLAN_MEMBERSHIP.ipipe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', + u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_4_B.mmu_xpe0', u'EGR_QOS_CTRL_TCAM.epipe0', + u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'MY_STATION_TCAM.ipipe0', u'PKT_FLOW_SELECT_TCAM_2.ipipe0', u'SUBPORT_ID_TO_SGPP_MAP.ipipe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', + u'ING_SNAT.ipipe0', u'EGR_ZONE_4_EDITOR_CONTROL_TCAM.epipe0', u'IFP_POLICY_TABLE_WIDE_PIPE0.ipipe0', + u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', u'L2_USER_ENTRY.ipipe0', u'MY_STATION_TCAM_2.ipipe0', + u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', u'EGR_ZONE_2_EDITOR_CONTROL_TCAM.epipe0', + u'L3_ENTRY_ONLY_SINGLE.ipipe0', u'EGR_VLAN_XLATE_2_SINGLE.epipe0', u'PHB_SELECT_TCAM.ipipe0', + u'SRC_COMPRESSION.ipipe0', u'EGR_PKT_FLOW_SELECT_TCAM.epipe0', u'MODPORT_MAP_MIRROR.ipipe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', + u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', u'IFP_LOGICAL_TABLE_SELECT.ipipe0', + u'PKT_FLOW_SELECT_TCAM_1.ipipe0', u'IFP_TCAM.ipipe0', u'RH_LAG_FLOWSET_PIPE0.ipipe0', + u'ING_DNAT_ADDRESS_TYPE.ipipe0', u'SRC_COMPRESSION_PIPE1.ipipe0', u'MODPORT_MAP_SUBPORT_MIRROR.ipipe0', + u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', u'PKT_FLOW_SELECT_TCAM_0.ipipe0', + u'IFP_POLICY_TABLE_WIDE_PIPE1.ipipe0', u'MMU_THDU_CONFIG_QUEUE1_PIPE1.mmu_xpe0', + u'DST_COMPRESSION_PIPE0.ipipe0', u'L3_DEFIP_PAIR_128.ipipe0', u'EGR_IP_TUNNEL_MPLS.epipe0', + u'L3_DEFIP.ipipe0', u'L2_ENTRY_ONLY_SINGLE.ipipe0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', + u'DST_COMPRESSION_PIPE1.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', + u'IP_PARSER1_MICE_TCAM_1.ipipe0', u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', + u'EGR_VP_VLAN_MEMBERSHIP.epipe0', u'L3_ENTRY_ONLY_DOUBLE.ipipe0', u'FLEX_RTAG7_HASH_TCAM.ipipe0', + u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', u'EGR_VLAN_XLATE_1_DOUBLE.epipe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', u'VLAN_XLATE_1_DOUBLE.ipipe0', + u'EGR_VLAN_XLATE_2_DOUBLE.epipe0', u'IFP_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', + u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', + u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', + u'RH_LAG_FLOWSET_PIPE1.ipipe0', u'MPLS_ENTRY_SINGLE.ipipe0', + u'EGR_FIELD_EXTRACTION_PROFILE_2_TCAM.epipe0', u'EGR_IP_TUNNEL_MPLS_DOUBLE_WIDE.epipe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sed0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'MMU_MTRO_CONFIG_L0_MEM_PIPE0.mmu_sed0', u'VLAN_XLATE_1_SINGLE.ipipe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'IFP_TCAM_PIPE0.ipipe0', u'RH_HGT_FLOWSET.ipipe0', + u'TCB_THRESHOLD_PROFILE_MAP_XPE0.mmu_xpe0', u'EXACT_MATCH_LOGICAL_TABLE_SELECT.ipipe0', + u'VLAN_XLATE_2_SINGLE.ipipe0', u'RH_ECMP_FLOWSET_PIPE1.ipipe0', + u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', u'EGR_FIELD_EXTRACTION_PROFILE_1_TCAM.epipe0', + u'L2_ENTRY.ipipe0', u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', u'L2_ENTRY_SINGLE.ipipe0', + u'RH_HGT_FLOWSET_PIPE0.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'IP_PARSER2_MICE_TCAM_1.ipipe0', + u'MMU_THDM_DB_QUEUE_CONFIG_A_PIPE1.mmu_xpe0', u'L3_ENTRY_SINGLE.ipipe0', u'IFP_POLICY_TABLE_WIDE.ipipe0', + u'L3_TUNNEL.ipipe0', u'EGR_IP_TUNNEL_IPV6.epipe0', u'EGR_VLAN.epipe0', + u'EGR_ZONE_1_DOT1P_MAPPING_TABLE_2.epipe0', u'EGR_ZONE_1_DOT1P_MAPPING_TABLE_3.epipe0', + u'EGR_ZONE_3_DOT1P_MAPPING_TABLE_4.epipe0', u'EGR_VLAN_CONTROL_3.epipe0', + u'EGR_ZONE_3_DOT1P_MAPPING_TABLE_2.epipe0', u'EGR_ZONE_1_DOT1P_MAPPING_TABLE_1.epipe0', + u'EGR_ZONE_1_DOT1P_MAPPING_TABLE_4.eABLE_4.epipe0', u'EGR_ZONE_3_DOT1P_MAPPING_TABLE_1.epipe0', + u'EGR_FLEX_CONTAINER_UPDATE_PROFILE_1.epipe0', u'EGR_ZONE_3_DOT1P_MAPPING_TABLE_3.epipe0', + u'EGR_VLAN_CONTROL_2.epipe0', u'EGR_ZONE_1_DOT1P_MAPPING_TABLE_4.epipe0', + ], + 'timeout_basic' : [ + u'EGR_ZONE_0_EDITOR_CONTROL_TCAM.epipe0', u'DLB_ECMP_FLOWSET_MEMBER.ipipe0', + u'DLB_ECMP_FLOWSET_MEMBER_PIPE0.ipipe0', u'INTFO_TC2PRI_MAPPING.mmu_glb0', + u'EGR_FLEX_CONTAINER_UPDATE_PROFILE_0.epipe0', + ], + 'slow_injection' : [ + u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', u'IFP_STORM_CONTROL_METERS.ipipe0', u'TDM_CALENDAR0_PIPE0.mmu_sc0', + u'EFP_METER_TABLE_PIPE1.epipe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_B_PIPE1.mmu_sed0', + u'IFP_METER_TABLE_PIPE1.ipipe0', u'MMU_WRED_CONFIG_PIPE0.mmu_xpe0', u'MMU_WRED_CONFIG_PIPE1.mmu_xpe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sed0', u'TDM_CALENDAR0_PIPE1.mmu_sc0', u'IFP_METER_TABLE.ipipe0', + u'L3_ENTRY_ONLY_QUAD.ipipe0', u'IFP_METER_TABLE_PIPE0.ipipe0', u'EFP_METER_TABLE.epipe0', + u'DLB_HGT_LAG_QUANTIZE_CONTROL.ipipe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE1.mmu_sed0', + u'EFP_METER_TABLE_PIPE0.epipe0', u'DLB_ECMP_QUANTIZE_CONTROL.ipipe0', + ], + 'unsupported' : [ + u'EGR_VLAN_XLATE_2_ECC.epipe0', u'IP_PARSER1_HME_STAGE_TCAM_NARROW_ONLY_0.ipipe0', + u'IP_PARSER0_HME_STAGE_TCAM_NARROW_ONLY_0.ipipe0', u'IP_PARSER1_HME_STAGE_TCAM_NARROW_ONLY_4.ipipe0', + u'IP_PARSER2_HME_STAGE_TCAM_NARROW_ONLY_0.ipipe0', u'L3_DEFIP_ALPM_HIT_ONLY.ipipe0', + u'L3_DEFIP_PAIR_128_HIT_ONLY.ipipe0', u'IP_PARSER1_HME_STAGE_TCAM_NARROW_ONLY_3.ipipe0', + u'IP_PARSER2_HME_STAGE_TCAM_NARROW_ONLY_1.ipipe0', u'L3_DEFIP_HIT_ONLY.ipipe0', u'L2_ENTRY_ECC.ipipe0', + u'IP_PARSER1_HME_STAGE_TCAM_NARROW_ONLY_1.ipipe0', u'EGR_VLAN_XLATE_1_ECC.epipe0', + u'IP_PARSER2_HME_STAGE_TCAM_NARROW_ONLY_4.ipipe0', u'L3_ENTRY_ECC.ipipe0', u'VLAN_XLATE_1_ECC.ipipe0', + u'VLAN_XLATE_2_ECC.ipipe0', + ] + }, + 'th' : { + 'timeout' : [ + u'EGR_IP_TUNNEL_MPLS.epipe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', + u'MMU_THDU_RESUME_PORT_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE2.mmu_xpe0', u'MPLS_ENTRY_DOUBLE.ipipe0', + u'MMU_THDU_CONFIG_QUEUE_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', + u'IFP_TCAM_WIDE_PIPE2.ipipe0', u'FP_GM_FIELDS.ipipe0', u'FP_STORM_CONTROL_METERS.ipipe0', + u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', + u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE3.mmu_xpe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_THDU_CONFIG_QGROUP_PIPE3.mmu_xpe0', + u'EGR_IP_TUNNEL_IPV6.epipe0', u'MODPORT_MAP_MIRROR.ipipe0', u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', + u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE3.mmu_xpe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sc0', + u'L3_DEFIP_ALPM_IPV6_128.ipipe0', u'IFP_TCAM_WIDE_PIPE3.ipipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sc0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE2.mmu_xpe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE2.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE2.mmu_xpe0', u'VLAN_XLATE.ipipe0', + u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'VLAN_MAC.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', + u'MMU_THDU_RESUME_PORT_PIPE2.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', + u'L3_DEFIP_ALPM_IPV4.ipipe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sc0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE1.ipipe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE2.mmu_xpe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', + u'THDI_PORT_SP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE3.mmu_xpe0', + u'MMU_THDU_OFFSET_QGROUP_PIPE2.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', + u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', + u'IFP_TCAM_WIDE_PIPE0.ipipe0', u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'MMU_THDU_OFFSET_QUEUE_PIPE2.mmu_xpe0', + u'IFP_TCAM.ipipe0', u'THDI_PORT_SP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE2.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sc0', + u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', u'EGR_VLAN_XLATE.epipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE3.mmu_xpe0', + u'L3_ENTRY_IPV4_UNICAST.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', + u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE3.mmu_xpe0', + u'MMU_THDU_CONFIG_PORT_PIPE2.mmu_xpe0', u'L2_ENTRY.ipipe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE2.mmu_xpe0', + u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', + u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC1.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', + u'L3_DEFIP_ALPM_IPV6_64.ipipe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', + u'L3_ENTRY_IPV6_UNICAST.ipipe0', + ], + 'timeout_basic' : [ + u'IFP_POLICY_TABLE_PIPE0.ipipe0', u'IFP_POLICY_TABLE.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', + u'EXACT_MATCH_4_PIPE1.ipipe0', u'EXACT_MATCH_4_PIPE2.ipipe0', u'EXACT_MATCH_2_PIPE1.ipipe0', + u'EXACT_MATCH_4_PIPE0.ipipe0', u'EXACT_MATCH_2_PIPE2.ipipe0', u'EXACT_MATCH_2_PIPE0.ipipe0', + u'EXACT_MATCH_4_PIPE3.ipipe0', u'EXACT_MATCH_4.ipipe0', u'EXACT_MATCH_2_PIPE3.ipipe0', + u'EXACT_MATCH_2.ipipe0', u'ING_FLEX_CTR_OFFSET_TABLE_11.ipipe0', u'PORT_LAG_FAILOVER_SET.ipipe0', + u'VFP_POLICY_TABLE_PIPE2.ipipe0', u'Q_SCHED_L1_WEIGHT_MEM_PIPE2.mmu_sc0', u'SYSTEM_CONFIG_TABLE.ipipe0', + u'RTAG7_PORT_BASED_HASH.ipipe0', u'MMU_REPL_LIST_TBL_PIPE2.mmu_sc0', u'EGR_GPP_ATTRIBUTES.epipe0', + u'EGRESS_MASK.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', u'EMIRROR_CONTROL2.ipipe0', + u'VLAN_MPLS.ipipe0', u'ING_DVP_TABLE.ipipe0', u'EXACT_MATCH_QOS_ACTIONS_PROFILE.ipipe0', + u'MMU_REPL_GROUP_INFO_TBL_PIPE3.mmu_sc0', u'MMU_REPL_LIST_TBL_PIPE1.mmu_sc0', u'VFI_1.ipipe0', + u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', u'EFP_POLICY_TABLE_PIPE0.epipe0', u'EFP_POLICY_TABLE.epipe0', + u'IFP_STORM_CONTROL_METERS.ipipe0', ], - 'td3' : [ - # cannot pass - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sc0', u'ING_SNAT.ipipe0', - u'TCB_THRESHOLD_PROFILE_MAP_XPE2.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP1_PIPE2.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sc0', u'ING_VP_VLAN_MEMBERSHIP.ipipe0', - u'L3_ENTRY_IPV4_UNICAST.ipipe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', - u'THDI_PORT_SP_CONFIG_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', - u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE0.mmu_sed0', - u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', - u'MMU_THDU_Q_TO_QGRP_MAP_PIPE2.mmu_xpe0', u'FP_STORM_CONTROL_METERS.ipipe0', - u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', u'VLAN_XLATE.ipipe0', - u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sed0', - u'MMU_THDM_DB_QUEUE_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE2.mmu_xpe0', - u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', - u'MMU_MTRO_CONFIG_L0_MEM_PIPE1.mmu_sed0', u'IFP_TCAM.ipipe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sed0', u'L3_TUNNEL.ipipe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', u'L2_ENTRY.ipipe0', u'L3_DEFIP_ALPM_IPV6_128.ipipe0', - u'FP_GLOBAL_MASK_TCAM.ipipe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE2.mmu_xpe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE3.mmu_xpe0', u'FP_GM_FIELDS.ipipe0', - u'TCB_THRESHOLD_PROFILE_MAP_XPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE2.mmu_xpe0', - u'EGR_IP_TUNNEL_IPV6.epipe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'MODPORT_MAP_MIRROR.ipipe0', - u'VLAN_MAC.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE2.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE2.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', - u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QGROUP_PIPE3.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', - u'INTFO_TC2PRI_MAPPING.mmu_glb0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE2.mmu_sed0', u'MPLS_ENTRY_DOUBLE.ipipe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sc0', u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_1_B.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE2.mmu_xpe0', u'EGR_IP_TUNNEL_MPLS.epipe0', u'L3_DEFIP_ALPM_IPV4.ipipe0', - u'THDI_PORT_SP_CONFIG_PIPE2.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', - u'MMU_THDU_Q_TO_QGRP_MAP_PIPE3.mmu_xpe0', u'TCB_THRESHOLD_PROFILE_MAP_XPE0.mmu_xpe0', - u'EGR_VP_VLAN_MEMBERSHIP.epipe0', u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', - u'EGR_VLAN_XLATE.epipe0', u'L3_DEFIP_ALPM_IPV6_64.ipipe0', u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', - u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'MMU_THDU_OFFSET_QGROUP_PIPE2.mmu_xpe0', - u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', - u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', u'L3_ENTRY_IPV6_UNICAST.ipipe0', u'ING_DNAT_ADDRESS_TYPE.ipipe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', - u'MMU_THDU_CONFIG_QUEUE_PIPE3.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', u'TCB_THRESHOLD_PROFILE_MAP_XPE3.mmu_xpe0', - u'MMU_THDU_RESUME_PORT_PIPE3.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE3.mmu_xpe0', - u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC1.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE3.mmu_sed0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sc0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sed0', - u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_C_PIPE3.mmu_xpe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE2.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sed0', u'IFP_TCAM_WIDE_PIPE3.ipipe0', - u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', - u'IFP_TCAM_WIDE_PIPE2.ipipe0', - # fail only with basic mode - u'MODPORT_MAP_SUBPORT_MIRROR.ipipe0', u'EGR_VLAN_XLATE_2_DOUBLE.epipe0', u'PKT_FLOW_SELECT_TCAM_2.ipipe0', - u'EGR_ZONE_3_EDITOR_CONTROL_TCAM.epipe0', u'RH_ECMP_FLOWSET_PIPE0.ipipe0', u'EGR_VLAN_XLATE_1_DOUBLE.epipe0', - u'RH_ECMP_FLOWSET.ipipe0', u'DST_COMPRESSION_PIPE1.ipipe0', u'EGR_FIELD_EXTRACTION_PROFILE_2_TCAM.epipe0', - u'VLAN_XLATE_2_DOUBLE.ipipe0', u'L3_DEFIP.ipipe0', u'EGR_PKT_FLOW_SELECT_TCAM.epipe0', - u'FLEX_RTAG7_HASH_TCAM.ipipe0', u'L2_ENTRY_SINGLE.ipipe0', u'VLAN_XLATE_1_SINGLE.ipipe0', - u'EGR_FIELD_EXTRACTION_PROFILE_1_TCAM.epipe0', u'EXACT_MATCH_LOGICAL_TABLE_SELECT.ipipe0', - u'SRC_COMPRESSION.ipipe0', u'EGR_VLAN_XLATE_1_SINGLE.epipe0', u'RH_HGT_FLOWSET_PIPE0.ipipe0', - u'VLAN_SUBNET.ipipe0', u'RH_LAG_FLOWSET_PIPE1.ipipe0', u'MY_STATION_TCAM_2.ipipe0', - u'EGR_ZONE_4_EDITOR_CONTROL_TCAM.epipe0', u'RH_LAG_FLOWSET.ipipe0', u'RH_HGT_FLOWSET_PIPE1.ipipe0', - u'L3_DEFIP_PAIR_128.ipipe0', u'L3_ENTRY_ONLY_SINGLE.ipipe0', u'MPLS_ENTRY_SINGLE.ipipe0', - u'EGR_ZONE_2_EDITOR_CONTROL_TCAM.epipe0', u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', - u'IFP_TCAM_PIPE1.ipipe0', u'L3_ENTRY_QUAD.ipipe0', u'RH_ECMP_FLOWSET_PIPE1.ipipe0', - u'VLAN_XLATE_2_SINGLE.ipipe0', u'L2_ENTRY_ONLY_SINGLE.ipipe0', u'EGR_ZONE_1_EDITOR_CONTROL_TCAM.epipe0', - u'SRC_COMPRESSION_PIPE0.ipipe0', u'L3_ENTRY_ONLY_DOUBLE.ipipe0', u'SRC_COMPRESSION_PIPE1.ipipe0', - u'SUBPORT_ID_TO_SGPP_MAP.ipipe0', u'PKT_FLOW_SELECT_TCAM_0.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', u'L3_ENTRY_SINGLE.ipipe0', u'DST_COMPRESSION.ipipe0', - u'L2_USER_ENTRY.ipipe0', u'L3_ENTRY_ONLY_QUAD.ipipe0', u'PKT_FLOW_SELECT_TCAM_1.ipipe0', - u'IP_PARSER1_MICE_TCAM_0.ipipe0', u'PHB_SELECT_TCAM.ipipe0', u'MY_STATION_TCAM.ipipe0', - u'CPU_COS_MAP.ipipe0', u'L3_DEFIP_ALPM_RAW.ipipe0', u'DST_COMPRESSION_PIPE0.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT.ipipe0', u'IFP_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', u'VLAN_XLATE_1_DOUBLE.ipipe0', - u'RH_HGT_FLOWSET.ipipe0', u'IFP_TCAM_PIPE0.ipipe0', u'IP_PARSER1_MICE_TCAM_1.ipipe0', - u'EGR_IP_TUNNEL_MPLS_DOUBLE_WIDE.epipe0', u'IFP_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', u'L3_ENTRY_DOUBLE.ipipe0', - u'IP_PARSER2_MICE_TCAM_1.ipipe0', u'IP_PARSER2_MICE_TCAM_0.ipipe0', u'RH_LAG_FLOWSET_PIPE0.ipipe0', - u'EGR_VLAN_XLATE_2_SINGLE.epipe0', u'EGR_QOS_CTRL_TCAM.epipe0', u'EGR_ZONE_0_EDITOR_CONTROL_TCAM.epipe0', - # fail randomly with basic mode - u'IFP_POLICY_TABLE_WIDE_PIPE0.ipipe0', u'IFP_POLICY_TABLE_WIDE.ipipe0', u'IFP_POLICY_TABLE_WIDE_PIPE1.ipipe0', - u'IFP_METER_TABLE_PIPE1.ipipe0', u'IFP_METER_TABLE.ipipe0', + 'slow_injection' : [ ], - 'th' : [ - # cannot pass - u'EGR_IP_TUNNEL_MPLS.epipe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', - u'MMU_THDU_RESUME_PORT_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE2.mmu_xpe0', u'MPLS_ENTRY_DOUBLE.ipipe0', - u'MMU_THDU_CONFIG_QUEUE_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', - u'IFP_TCAM_WIDE_PIPE2.ipipe0', u'FP_GM_FIELDS.ipipe0', u'FP_STORM_CONTROL_METERS.ipipe0', - u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', - u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE3.mmu_xpe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_THDU_CONFIG_QGROUP_PIPE3.mmu_xpe0', - u'EGR_IP_TUNNEL_IPV6.epipe0', u'MODPORT_MAP_MIRROR.ipipe0', u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', - u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE3.mmu_xpe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sc0', - u'L3_DEFIP_ALPM_IPV6_128.ipipe0', u'IFP_TCAM_WIDE_PIPE3.ipipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sc0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE2.mmu_xpe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE2.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE2.mmu_xpe0', u'VLAN_XLATE.ipipe0', - u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'VLAN_MAC.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', - u'MMU_THDU_RESUME_PORT_PIPE2.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', - u'L3_DEFIP_ALPM_IPV4.ipipe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sc0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE1.ipipe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE2.mmu_xpe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', - u'THDI_PORT_SP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE3.mmu_xpe0', - u'MMU_THDU_OFFSET_QGROUP_PIPE2.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', - u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', - u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', - u'IFP_TCAM_WIDE_PIPE0.ipipe0', u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'MMU_THDU_OFFSET_QUEUE_PIPE2.mmu_xpe0', - u'IFP_TCAM.ipipe0', u'THDI_PORT_SP_CONFIG_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE2.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE2.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sc0', - u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', u'EGR_VLAN_XLATE.epipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE3.mmu_xpe0', - u'L3_ENTRY_IPV4_UNICAST.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', - u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE3.mmu_xpe0', - u'MMU_THDU_CONFIG_PORT_PIPE2.mmu_xpe0', u'L2_ENTRY.ipipe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE2.mmu_xpe0', - u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', - u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC1.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', - u'L3_DEFIP_ALPM_IPV6_64.ipipe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', - u'L3_ENTRY_IPV6_UNICAST.ipipe0', - # fail randomly with basic mode - u'IFP_POLICY_TABLE_PIPE0.ipipe0', u'IFP_POLICY_TABLE.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', - u'EXACT_MATCH_4_PIPE1.ipipe0', u'EXACT_MATCH_4_PIPE2.ipipe0', u'EXACT_MATCH_2_PIPE1.ipipe0', - u'EXACT_MATCH_4_PIPE0.ipipe0', u'EXACT_MATCH_2_PIPE2.ipipe0', u'EXACT_MATCH_2_PIPE0.ipipe0', - u'EXACT_MATCH_4_PIPE3.ipipe0', u'EXACT_MATCH_4.ipipe0', u'EXACT_MATCH_2_PIPE3.ipipe0', - u'EXACT_MATCH_2.ipipe0', u'ING_FLEX_CTR_OFFSET_TABLE_11.ipipe0', u'PORT_LAG_FAILOVER_SET.ipipe0', - u'VFP_POLICY_TABLE_PIPE2.ipipe0', u'Q_SCHED_L1_WEIGHT_MEM_PIPE2.mmu_sc0', u'SYSTEM_CONFIG_TABLE.ipipe0', - u'RTAG7_PORT_BASED_HASH.ipipe0', u'MMU_REPL_LIST_TBL_PIPE2.mmu_sc0', u'EGR_GPP_ATTRIBUTES.epipe0', - u'EGRESS_MASK.ipipe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', u'EMIRROR_CONTROL2.ipipe0', - u'VLAN_MPLS.ipipe0', u'ING_DVP_TABLE.ipipe0', u'EXACT_MATCH_QOS_ACTIONS_PROFILE.ipipe0', - u'MMU_REPL_GROUP_INFO_TBL_PIPE3.mmu_sc0', u'MMU_REPL_LIST_TBL_PIPE1.mmu_sc0', u'VFI_1.ipipe0', - u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', u'EFP_POLICY_TABLE_PIPE0.epipe0', u'EFP_POLICY_TABLE.epipe0', - u'IFP_STORM_CONTROL_METERS.ipipe0', + 'unsupported' : [ + ] + }, + 'th2' : { + 'timeout' : [ + u'TCB_THRESHOLD_PROFILE_MAP_XPE3.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE3.mmu_xpe0', u'VLAN_MAC.ipipe0', u'EGR_VP_VLAN_MEMBERSHIP.epipe0', + u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE3.mmu_xpe0', + u'MMU_THDU_RESUME_PORT_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE2.mmu_xpe0', + u'TCB_THRESHOLD_PROFILE_MAP_XPE1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sed0', + u'MMU_THDU_OFFSET_QGROUP_PIPE2.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE2.mmu_sed0', + u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sed0', u'VLAN_XLATE.ipipe0', + u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', + u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', + u'INTFO_TC2PRI_MAPPING.mmu_glb0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', + u'MMU_MTRO_CONFIG_L0_MEM_PIPE3.mmu_sed0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', + u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC1.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', + u'IFP_TCAM_WIDE_PIPE0.ipipe0', u'L3_DEFIP_ALPM_IPV6_64.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE2.ipipe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sed0', u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', + u'THDI_PORT_SP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE2.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_C_PIPE3.mmu_xpe0', + u'L3_TUNNEL.ipipe0', u'MPLS_ENTRY_DOUBLE.ipipe0', u'ING_DNAT_ADDRESS_TYPE.ipipe0', + u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE2.mmu_xpe0', + u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'L3_DEFIP_ALPM_IPV4.ipipe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE0.mmu_sed0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE3.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE2.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE1.ipipe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE2.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE3.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', u'L3_ENTRY_IPV4_UNICAST.ipipe0', u'IFP_TCAM_WIDE_PIPE3.ipipe0', + u'MMU_THDU_CONFIG_QGROUP_PIPE3.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE2.mmu_xpe0', + u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sed0', u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', + u'MMU_THDU_CONFIG_QUEUE_PIPE3.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE2.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE2.mmu_xpe0', + u'TCB_THRESHOLD_PROFILE_MAP_XPE0.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', + u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE2.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'L3_DEFIP_ALPM_IPV6_128.ipipe0', + u'MMU_THDM_DB_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'MODPORT_MAP_MIRROR.ipipe0', u'IFP_TCAM.ipipe0', + u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'EGR_VLAN_XLATE.epipe0', u'FP_GM_FIELDS.ipipe0', + u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE1.mmu_sed0', + u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', u'TCB_THRESHOLD_PROFILE_MAP_XPE2.mmu_xpe0', + u'FP_STORM_CONTROL_METERS.ipipe0', u'L2_ENTRY.ipipe0', u'EGR_IP_TUNNEL_IPV6.epipe0', + u'MMU_THDU_OFFSET_QUEUE_PIPE3.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE2.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE2.mmu_xpe0', + u'L3_ENTRY_IPV6_UNICAST.ipipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', + u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE2.mmu_xpe0', + u'EGR_IP_TUNNEL_MPLS.epipe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE3.mmu_xpe0', + u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', u'ING_SNAT.ipipe0', + u'MMU_THDM_MCQE_QUEUE_OFFSET_B_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0', + u'ING_VP_VLAN_MEMBERSHIP.ipipe0', u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', + ], + 'timeout_basic' : [ + ], + 'slow_injection' : [ + ], + 'unsupported' : [ + ] + }, + 'th3' : { + 'timeout' : [ + u'L3_DEFIP_TCAM_LEVEL1.ipipe0', + u'MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE7.mmu_eb0', + u'L3_ENTRY_ONLY_SINGLE.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE6.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE3.mmu_eb0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', + u'L3_ENTRY_SINGLE.ipipe0', + u'L2_ENTRY.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE6.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', + u'L3_DEFIP_ALPM_LEVEL3.ipipe0', + u'L3_ENTRY_DOUBLE.ipipe0', + u'L3_TUNNEL_QUAD.ipipe0', + u'L3_DEFIP_PAIR_LEVEL1.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE3.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', + u'L3_ENTRY_ONLY_DOUBLE.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE0.mmu_eb0', + u'L3_DEFIP_ALPM_LEVEL2.ipipe0', + u'EGR_IP_TUNNEL_IPV6.epipe0', + u'EXACT_MATCH_ECC.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE3.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE5.ipipe0', + u'L3_DEFIP_ALPM_LEVEL3_SINGLE.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE5.mmu_eb0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE2.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE2.ipipe0', + u'L3_ENTRY_QUAD.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', + u'EGR_IP_TUNNEL_MPLS.epipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE5.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE4.mmu_eb0', + u'L2_USER_ENTRY.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE6.mmu_eb0', + u'MY_STATION_TCAM.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE4.ipipe0', + u'L3_DEFIP_LEVEL1.ipipe0' , + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE2.mmu_eb0', + u'L3_DEFIP_ALPM_LEVEL2_SINGLE.ipipe0', + u'L3_TUNNEL_DOUBLE.ipipe0', + u'L3_ENTRY_ONLY_QUAD.ipipe0', + u'IFP_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', + u'MMU_QSCH_L2_WEIGHT_MEM_PIPE1.mmu_eb0', + u'MPLS_ENTRY_SINGLE.ipipe0', + u'CPU_COS_MAP.ipipe0', + u'L3_TUNNEL_SINGLE.ipipe0', + u'L3_DEFIP_ALPM_LEVEL2_HIT_ONLY.ipipe0', + u'L2_ENTRY_ONLY_SINGLE.ipipe0', + u'L3_DEFIP_LEVEL1_HIT_ONLY.ipipe0', + u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE4.ipipe0', + u'L3_DEFIP_ALPM_LEVEL3_HIT_ONLY.ipipe0' ], - 'th2' : [ - # cannot pass - u'TCB_THRESHOLD_PROFILE_MAP_XPE3.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE0.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE3.mmu_xpe0', u'VLAN_MAC.ipipe0', u'EGR_VP_VLAN_MEMBERSHIP.epipe0', - u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE3.mmu_xpe0', - u'MMU_THDU_RESUME_PORT_PIPE3.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE2.mmu_xpe0', - u'TCB_THRESHOLD_PROFILE_MAP_XPE1.mmu_xpe0', u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE2.mmu_sed0', - u'MMU_THDU_OFFSET_QGROUP_PIPE2.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE0.mmu_xpe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE0.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE2.mmu_sed0', - u'L3_ENTRY_IPV6_MULTICAST.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_0.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE0.mmu_sed0', u'VLAN_XLATE.ipipe0', - u'MMU_THDU_RESUME_PORT_PIPE1.mmu_xpe0', u'L3_ENTRY_IPV4_MULTICAST.ipipe0', - u'MMU_THDU_OFFSET_QUEUE_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE0.mmu_xpe0', - u'INTFO_TC2PRI_MAPPING.mmu_glb0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE1.mmu_xpe0', - u'MMU_MTRO_CONFIG_L0_MEM_PIPE3.mmu_sed0', u'MMU_THDU_OFFSET_QGROUP_PIPE0.mmu_xpe0', - u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC1.mmu_xpe0', u'MMU_THDU_CONFIG_QUEUE_PIPE0.mmu_xpe0', - u'IFP_TCAM_WIDE_PIPE0.ipipe0', u'L3_DEFIP_ALPM_IPV6_64.ipipe0', u'MMU_WRED_DROP_CURVE_PROFILE_6.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_5.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE2.ipipe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE3.mmu_sed0', u'MMU_THDU_CONFIG_QGROUP_PIPE1.mmu_xpe0', - u'THDI_PORT_SP_CONFIG_PIPE3.mmu_xpe0', u'MMU_THDU_RESUME_PORT_PIPE2.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_C_PIPE3.mmu_xpe0', - u'L3_TUNNEL.ipipe0', u'MPLS_ENTRY_DOUBLE.ipipe0', u'ING_DNAT_ADDRESS_TYPE.ipipe0', - u'THDI_PORT_SP_CONFIG_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE2.mmu_xpe0', - u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE3.mmu_xpe0', u'L3_DEFIP_ALPM_IPV4.ipipe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE0.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE0.mmu_sed0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE3.mmu_xpe0', u'MMU_THDM_DB_PORTSP_CONFIG_PIPE2.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_3.mmu_xpe0', u'IFP_TCAM_WIDE_PIPE1.ipipe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE2.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE3.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE1.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_7.mmu_xpe0', u'L3_ENTRY_IPV4_UNICAST.ipipe0', u'IFP_TCAM_WIDE_PIPE3.ipipe0', - u'MMU_THDU_CONFIG_QGROUP_PIPE3.mmu_xpe0', u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE2.mmu_xpe0', - u'MMU_MTRO_EGRMETERINGCONFIG_MEM_PIPE1.mmu_sed0', u'MMU_REPL_GROUP_INITIAL_COPY_COUNT_SC0.mmu_xpe0', - u'MMU_THDU_CONFIG_QUEUE_PIPE3.mmu_xpe0', u'THDI_PORT_SP_CONFIG_PIPE2.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_8.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE2.mmu_xpe0', - u'TCB_THRESHOLD_PROFILE_MAP_XPE0.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE0.mmu_xpe0', - u'MMU_THDU_OFFSET_QUEUE_PIPE1.mmu_xpe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE2.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE1.mmu_xpe0', u'L3_DEFIP_ALPM_IPV6_128.ipipe0', - u'MMU_THDM_DB_QUEUE_CONFIG_PIPE3.mmu_xpe0', u'MODPORT_MAP_MIRROR.ipipe0', u'IFP_TCAM.ipipe0', - u'MMU_THDM_DB_PORTSP_CONFIG_PIPE1.mmu_xpe0', u'EGR_VLAN_XLATE.epipe0', u'FP_GM_FIELDS.ipipe0', - u'MMU_THDU_CONFIG_QUEUE_PIPE1.mmu_xpe0', u'MMU_MTRO_CONFIG_L0_MEM_PIPE1.mmu_sed0', - u'THDI_PORT_SP_CONFIG_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE1.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE0.mmu_xpe0', u'MMU_THDM_MCQE_PORTSP_CONFIG_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_4.mmu_xpe0', u'TCB_THRESHOLD_PROFILE_MAP_XPE2.mmu_xpe0', - u'FP_STORM_CONTROL_METERS.ipipe0', u'L2_ENTRY.ipipe0', u'EGR_IP_TUNNEL_IPV6.epipe0', - u'MMU_THDU_OFFSET_QUEUE_PIPE3.mmu_xpe0', u'MMU_THDU_OFFSET_QUEUE_PIPE2.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE2.mmu_xpe0', u'MMU_THDU_CONFIG_PORT_PIPE2.mmu_xpe0', - u'L3_ENTRY_IPV6_UNICAST.ipipe0', u'MMU_THDU_Q_TO_QGRP_MAP_PIPE0.mmu_xpe0', - u'MMU_WRED_DROP_CURVE_PROFILE_1.mmu_xpe0', u'MMU_THDM_DB_QUEUE_CONFIG_PIPE2.mmu_xpe0', - u'EGR_IP_TUNNEL_MPLS.epipe0', u'MMU_THDM_MCQE_QUEUE_CONFIG_PIPE3.mmu_xpe0', - u'MMU_THDM_DB_QUEUE_OFFSET_0_PIPE1.mmu_xpe0', u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE1.mmu_xpe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', u'ING_SNAT.ipipe0', - u'MMU_THDM_MCQE_QUEUE_OFFSET_B_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0', - u'ING_VP_VLAN_MEMBERSHIP.ipipe0', u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', u'FP_GLOBAL_MASK_TCAM.ipipe0', + 'timeout_basic' : [ ], - 'th3' : [ - # cannot pass - u'L3_DEFIP_TCAM_LEVEL1.ipipe0', - u'MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE7.mmu_eb0', - u'L3_ENTRY_ONLY_SINGLE.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE6.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE3.mmu_eb0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', - u'L3_ENTRY_SINGLE.ipipe0', - u'L2_ENTRY.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE6.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE0.ipipe0', - u'L3_DEFIP_ALPM_LEVEL3.ipipe0', - u'L3_ENTRY_DOUBLE.ipipe0', - u'L3_TUNNEL_QUAD.ipipe0', - u'L3_DEFIP_PAIR_LEVEL1.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE3.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', - u'L3_ENTRY_ONLY_DOUBLE.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE0.mmu_eb0', - u'L3_DEFIP_ALPM_LEVEL2.ipipe0', - u'EGR_IP_TUNNEL_IPV6.epipe0', - u'EXACT_MATCH_ECC.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE3.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE5.ipipe0', - u'L3_DEFIP_ALPM_LEVEL3_SINGLE.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE5.mmu_eb0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE2.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE2.ipipe0', - u'L3_ENTRY_QUAD.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE1.ipipe0', - u'EGR_IP_TUNNEL_MPLS.epipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE5.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE4.mmu_eb0', - u'L2_USER_ENTRY.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE6.mmu_eb0', - u'MY_STATION_TCAM.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE4.ipipe0', - u'L3_DEFIP_LEVEL1.ipipe0' , - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE2.mmu_eb0', - u'L3_DEFIP_ALPM_LEVEL2_SINGLE.ipipe0', - u'L3_TUNNEL_DOUBLE.ipipe0', - u'L3_ENTRY_ONLY_QUAD.ipipe0', - u'IFP_LOGICAL_TABLE_SELECT_PIPE7.ipipe0', - u'MMU_QSCH_L2_WEIGHT_MEM_PIPE1.mmu_eb0', - u'MPLS_ENTRY_SINGLE.ipipe0', - u'CPU_COS_MAP.ipipe0', - u'L3_TUNNEL_SINGLE.ipipe0', - u'L3_DEFIP_ALPM_LEVEL2_HIT_ONLY.ipipe0', - u'L2_ENTRY_ONLY_SINGLE.ipipe0', - u'L3_DEFIP_LEVEL1_HIT_ONLY.ipipe0', - u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE4.ipipe0', - u'L3_DEFIP_ALPM_LEVEL3_HIT_ONLY.ipipe0' + 'slow_injection' : [ + ], + 'unsupported' : [ ] + } } -# Stop trying if stall has been detected for so many consecutive iterations -# Combined with the test duration below. If we don't make progress for so -# long, then we stop waiting. -DEFAULT_STALL_INDICATION = 15 -DEFAULT_SER_TEST_TIME_SEC = 60 -DEFAULT_BATCH_SIZE=10 - -# Print verbose output for debugging -VERBOSE=False def run_cmd(cmd): ''' @@ -483,7 +515,10 @@ class SerTest(object): def __init__(self, test_time_sec = DEFAULT_SER_TEST_TIME_SEC, ser_injection_interval_sec = DEFAULT_SER_INJECTION_INTERVAL_SEC, syslog_poll_interval_sec = DEFAULT_SYSLOG_POLLING_INTERVAL_SEC, - stall_indication = DEFAULT_STALL_INDICATION): + stall_indication = DEFAULT_STALL_INDICATION, + batch_size = DEFAULT_BATCH_SIZE, + injection_slow_sec = DEFAULT_INJECTION_SLOW_SEC, + skip_slow_injections = False): ''' @summary: Class constructor ''' @@ -491,11 +526,15 @@ def __init__(self, test_time_sec = DEFAULT_SER_TEST_TIME_SEC, self.test_time_sec = test_time_sec self.ser_injection_interval_sec = ser_injection_interval_sec self.stall_indication = stall_indication + self.batch_size = batch_size + self.injection_slow_sec = injection_slow_sec + self.skip_slow_injections = skip_slow_injections self.test_candidates = [] self.mem_verification_pending = [] self.mem_verified = {} self.mem_failed = {} self.mem_ser_unsupported = [] + self.mem_injection_speed = {} self.miss_counts = {} self.bcmMemory = BcmMemory() @@ -509,27 +548,38 @@ def test_memory(self, completeness='basic'): global SRAM_SCAN_INTERVAL_USEC global SRAM_SCAN_ENTRIES - skip_list = [] + full_skip_list = get_skip_list_per_asic() self.bcmMemory.read_memory() if completeness == 'thorough': self.test_candidates = list(set(self.bcmMemory.get_cached_memory().keys())) + if self.batch_size == DEFAULT_BATCH_SIZE: + # Slightly increase batch size to reduce run time + self.batch_size = DEFAULT_THOROUGH_BATCH_SIZE + skip_list = [] elif completeness == 'diagnose': # Re-probing the normally skipped entries - self.test_candidates = get_skip_list_per_asic() + self.test_candidates = list(set(full_skip_list['timeout'] + full_skip_list['timeout_basic'] + + full_skip_list['unsupported'] + full_skip_list['slow_injection'])) else: - skip_list = get_skip_list_per_asic() + skip_list = list(set(full_skip_list['timeout'] + full_skip_list['unsupported'] + full_skip_list['slow_injection'])) + if completeness != 'confident': + skip_list = list(set(skip_list + full_skip_list['timeout_basic'])) self.test_candidates = list(set(self.bcmMemory.get_cached_memory().keys()) - set(skip_list)) + if self.skip_slow_injections: + self.test_candidates = list(set(self.test_candidates) - set(full_skip_list['slow_injection'])) + skip_list = list(set(skip_list + full_skip_list['slow_injection'])) + if completeness == 'debug': batch_size = min(1, len(self.test_candidates)) self.mem_verification_pending = random.sample(self.test_candidates, batch_size) elif completeness == 'basic': - batch_size = min(DEFAULT_BATCH_SIZE, len(self.test_candidates)) + batch_size = min(self.batch_size, len(self.test_candidates)) sample_size = min(batch_size * 6, len(self.test_candidates)) self.mem_verification_pending = random.sample(self.test_candidates, sample_size) - else: - batch_size = min(DEFAULT_BATCH_SIZE, len(self.test_candidates)) + else: # default: 'confident', 'thorough' + batch_size = min(self.batch_size, len(self.test_candidates)) # Still go through random to ramdomize the ordering self.mem_verification_pending = random.sample(self.test_candidates, len(self.test_candidates)) @@ -553,9 +603,9 @@ def test_memory(self, completeness='basic'): # all remaining memory will be tested in each iteration. while (len(self.mem_verification_pending) > 0): count += 1 - print("Test iteration {}, stalled {}, candidate(s) left {}".format(count, stall, len(self.mem_verification_pending))) size_before = len(self.mem_verification_pending) batch_size = min(batch_size, size_before) + print("Test iteration {}, stalled {}, candidate(s) left {} batch_size {}".format(count, stall, size_before, batch_size)) test_memory = list(self.mem_verification_pending[0:batch_size]) self.run_test(test_memory) size_after = len(self.mem_verification_pending) @@ -565,7 +615,7 @@ def test_memory(self, completeness='basic'): stall = 0 else: stall = stall + 1 - batch_size = min(len(self.mem_verification_pending), batch_size + DEFAULT_BATCH_SIZE) # Increase batch size when stall is detected + batch_size = min(len(self.mem_verification_pending), batch_size + self.batch_size) # Increase batch size when stall is detected if stall >= self.stall_indication: if VERBOSE: print('--- stall detected. Stop testing') @@ -579,9 +629,11 @@ def test_memory(self, completeness='basic'): else: print("SER Test memories candidates (%s)" % (len(self.test_candidates))) print("SER Test succeeded for memories (%s)" % (len(self.mem_verified))) - print("SER Test failed for memories (%s): %s" % (len(self.mem_failed), self.mem_failed)) + print("SER Test failed for memories (%s): %s %s" % (len(self.mem_failed), self.mem_failed, self.mem_failed.keys())) print("SER Test timed out for memories (%s): %s" % (len(self.mem_verification_pending), self.mem_verification_pending)) print("SER Test is not supported for memories (%s): %s" % (len(self.mem_ser_unsupported), self.mem_ser_unsupported)) + slow_injection = { k : v for k, v in self.mem_injection_speed.items() if v['slow'] > 0 } + print("SER Test memory error injection too slow (%s): %s %s" % (len(slow_injection), slow_injection, slow_injection.keys())) if VERBOSE: print("--- found {} memory location(s) reported misaligned correction events ---".format(len(self.miss_counts))) @@ -707,7 +759,18 @@ def run_test(self, memory, entry = 0): idx = idx + 1 tag = '{} / {}'.format(idx, cnt) self.mem_verification_pending.remove(mem) + inj_start_time = time.time() stdout, stderr = self.inject_ser(mem, tag = tag) + inj_time = time.time() - inj_start_time + speed = self.mem_injection_speed.get(mem, {'slow' : 0, 'fast' : 0, 'slow_times' : []}) + if inj_time < self.injection_slow_sec: + speed['fast'] = speed['fast'] + 1 + else: + speed['slow'] = speed['slow'] + 1 + speed['slow_times'].append(inj_time) + if VERBOSE: + print('--- mem {} error inject is slow: {}'.format(mem, speed)) + self.mem_injection_speed[mem] = speed if stdout.find('SER correction for it is not currently supported') > -1: print("memory %s does not support ser" % mem) self.mem_ser_unsupported.append(mem) @@ -733,16 +796,26 @@ def main(): global VERBOSE parser = argparse.ArgumentParser(description='Completeness level') + parser.add_argument('-b', '--batch_size', help='batch size: number of entries to inject at each batch, default {}'.format(DEFAULT_BATCH_SIZE), + type=int, required=False, default=DEFAULT_BATCH_SIZE) parser.add_argument('-c', '--completeness', help='Completeness level: debug, basic, confident, thorough, diagnose', type=str, required=False, default='basic', choices=['debug', 'basic', 'confident', 'thorough', 'diagnose']) + parser.add_argument('-e', '--skip_slow_injections', help='Skip slow injections, default False', action='store_true', required=False, default=False) + parser.add_argument('-i', '--injection_slow_sec', help='injection slow threshold in secs: stall count when stopping test, default {}'.format(DEFAULT_INJECTION_SLOW_SEC), + type=int, required=False, default=DEFAULT_INJECTION_SLOW_SEC) + parser.add_argument('-s', '--stall_limit', help='Stall limit: stall count when stopping test, default {}'.format(DEFAULT_STALL_INDICATION), + type=int, required=False, default=DEFAULT_STALL_INDICATION) + parser.add_argument('-t', '--test_batch_timeout', help='test batch timeout: max wait time for each batch (in seconds), default {}'.format(DEFAULT_SER_TEST_TIME_SEC), + type=int, required=False, default=DEFAULT_SER_TEST_TIME_SEC) parser.add_argument('-v', '--verbose', help='Set verbose output', action='store_true', required=False, default=False) args = parser.parse_args() VERBOSE = args.verbose start_time = time.time() - serTest = SerTest() + serTest = SerTest(test_time_sec=args.test_batch_timeout, stall_indication=args.stall_limit, batch_size=args.batch_size, + injection_slow_sec = args.injection_slow_sec, skip_slow_injections=args.skip_slow_injections) rc = serTest.test_memory(args.completeness) print("--- %s seconds, rc %d ---" % ((time.time() - start_time), rc)) sys.exit(rc) From b76fdc1c0a339cdb60e448d80d81669ca3cd3503 Mon Sep 17 00:00:00 2001 From: rawal01 <65668547+rawal01@users.noreply.github.com> Date: Tue, 10 Aug 2021 14:25:27 -0400 Subject: [PATCH 078/117] Add logic to skip interfaces that do not have tranceivers in tests under platform_tests/sfp (#3994) What is the motivation for this PR? Right now the tests under platform_tests/sfp/ folder do not have logic to skip ports that do not have transceivers them based on hwsku.json I am adding same xcvr_skip_list logic which is used in other sfp related tests in platform_tests/sfp/test_sfpshow.py platform_tests/sfp/test_sfputil.py platform_tests/sfp/test_show_intff_xcvr.py How did you do it? xcvr skip list to get list of ports to be skipped based on hwsku.json How did you verify/test it? test on pizza box with fix copper ports and sfp ports made sure the test only runs for sfp ports --- tests/platform_tests/sfp/test_sfpshow.py | 14 +- tests/platform_tests/sfp/test_sfputil.py | 142 ++++++++++-------- .../platform_tests/sfp/test_show_intf_xcvr.py | 14 +- 3 files changed, 93 insertions(+), 77 deletions(-) diff --git a/tests/platform_tests/sfp/test_sfpshow.py b/tests/platform_tests/sfp/test_sfpshow.py index dc9369095b3..72fd738084f 100644 --- a/tests/platform_tests/sfp/test_sfpshow.py +++ b/tests/platform_tests/sfp/test_sfpshow.py @@ -22,7 +22,7 @@ ] -def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -35,11 +35,12 @@ def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" -def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -52,5 +53,6 @@ def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostnam sfp_eeprom = duthost.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in dev_conn: - assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" - assert parsed_eeprom[intf] == "SFP EEPROM detected" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" + assert parsed_eeprom[intf] == "SFP EEPROM detected" diff --git a/tests/platform_tests/sfp/test_sfputil.py b/tests/platform_tests/sfp/test_sfputil.py index cb2a6b69f42..f50cb1bd1c9 100644 --- a/tests/platform_tests/sfp/test_sfputil.py +++ b/tests/platform_tests/sfp/test_sfputil.py @@ -27,8 +27,7 @@ pytest.mark.topology('any') ] - -def test_check_sfputil_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfputil_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -41,8 +40,9 @@ def test_check_sfputil_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostn sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" @pytest.mark.parametrize("cmd_sfp_error_status", ["sudo sfputil show error-status", "sudo sfputil show error-status --fetch-from-hardware"]) def test_check_sfputil_error_status(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, cmd_sfp_error_status): @@ -63,11 +63,12 @@ def test_check_sfputil_error_status(duthosts, enum_rand_one_per_hwsku_frontend_h pytest.skip("Skip test as error status isn't supported") parsed_presence = parse_output(sfp_error_status["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "OK", "Interface error status is not 'OK'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "OK", "Interface error status is not 'OK'" -def test_check_sfputil_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfputil_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -80,11 +81,12 @@ def test_check_sfputil_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostnam sfp_eeprom = duthost.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in dev_conn: - assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" - assert parsed_eeprom[intf] == "SFP EEPROM detected" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" + assert parsed_eeprom[intf] == "SFP EEPROM detected" -def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo): +def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -94,15 +96,17 @@ def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index) tested_physical_ports = set() for intf in dev_conn: - phy_intf = portmap[intf][0] - if phy_intf in tested_physical_ports: - logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) - continue - tested_physical_ports.add(phy_intf) - logging.info("resetting {} physical interface {}".format(intf, phy_intf)) - reset_result = duthost.command("{} {}".format(cmd_sfp_reset, intf)) - assert reset_result["rc"] == 0, "'{} {}' failed".format(cmd_sfp_reset, intf) - time.sleep(5) + if intf not in xcvr_skip_list[duthost.hostname]: + phy_intf = portmap[intf][0] + if phy_intf in tested_physical_ports: + logging.info( + "skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) + continue + tested_physical_ports.add(phy_intf) + logging.info("resetting {} physical interface {}".format(intf, phy_intf)) + reset_result = duthost.command("{} {}".format(cmd_sfp_reset, intf)) + assert reset_result["rc"] == 0, "'{} {}' failed".format(cmd_sfp_reset, intf) + time.sleep(5) logging.info("Wait some time for SFP to fully recover after reset") time.sleep(60) @@ -110,8 +114,9 @@ def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") mg_facts = duthost.get_extended_minigraph_facts(tbinfo) @@ -120,7 +125,7 @@ def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname "Some interfaces are down: {}".format(intf_facts["ansible_interface_link_down_ports"]) -def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo): +def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo, xcvr_skip_list): """ @summary: Check SFP low power mode @@ -136,42 +141,44 @@ def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index) global ans_host ans_host = duthost - logging.info("Check output of '{}'".format(cmd_sfp_show_lpmode)) lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) original_lpmode = copy.deepcopy(parsed_lpmode) for intf in dev_conn: - assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) - assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) + assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Try to change SFP lpmode") tested_physical_ports = set() not_supporting_lpm_physical_ports = set() for intf in dev_conn: - phy_intf = portmap[intf][0] - if phy_intf in tested_physical_ports: - logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) - continue - - sfp_type_cmd = 'redis-cli -n 6 hget "TRANSCEIVER_INFO|{}" type'.format(intf) - sfp_type_docker_cmd = asichost.get_docker_cmd(sfp_type_cmd, "database") - sfp_type = duthost.command(sfp_type_docker_cmd)["stdout"] - - power_class_cmd = 'redis-cli -n 6 hget "TRANSCEIVER_INFO|{}" ext_identifier'.format(intf) - power_class_docker_cmd = asichost.get_docker_cmd(power_class_cmd, "database") - power_class = duthost.command(power_class_docker_cmd)["stdout"] - - if not "QSFP" in sfp_type or "Power Class 1" in power_class: - logging.info("skip testing port {} which doesn't support LPM".format(intf)) - not_supporting_lpm_physical_ports.add(phy_intf) - continue - tested_physical_ports.add(phy_intf) - logging.info("setting {} physical interface {}".format(intf, phy_intf)) - new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on" - lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf)) - assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf) + if intf not in xcvr_skip_list[duthost.hostname]: + phy_intf = portmap[intf][0] + if phy_intf in tested_physical_ports: + logging.info( + "skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) + continue + + sfp_type_cmd = 'redis-cli -n 6 hget "TRANSCEIVER_INFO|{}" type'.format(intf) + sfp_type_docker_cmd = asichost.get_docker_cmd(sfp_type_cmd, "database") + sfp_type = duthost.command(sfp_type_docker_cmd)["stdout"] + + power_class_cmd = 'redis-cli -n 6 hget "TRANSCEIVER_INFO|{}" ext_identifier'.format(intf) + power_class_docker_cmd = asichost.get_docker_cmd(power_class_cmd, "database") + power_class = duthost.command(power_class_docker_cmd)["stdout"] + + if not "QSFP" in sfp_type or "Power Class 1" in power_class: + logging.info("skip testing port {} which doesn't support LPM".format(intf)) + not_supporting_lpm_physical_ports.add(phy_intf) + continue + tested_physical_ports.add(phy_intf) + logging.info("setting {} physical interface {}".format(intf, phy_intf)) + new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on" + lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf)) + assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) if len(tested_physical_ports) == 0: @@ -181,39 +188,44 @@ def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) - assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) + assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Try to change SFP lpmode") tested_physical_ports = set() for intf in dev_conn: - phy_intf = portmap[intf][0] - if phy_intf in not_supporting_lpm_physical_ports: - logging.info("skip testing port {} which doesn't support LPM".format(intf)) - continue - if phy_intf in tested_physical_ports: - logging.info("skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) - continue - tested_physical_ports.add(phy_intf) - logging.info("restoring {} physical interface {}".format(intf, phy_intf)) - new_lpmode = original_lpmode[intf].lower() - lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf)) - assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf) + if intf not in xcvr_skip_list[duthost.hostname]: + phy_intf = portmap[intf][0] + if phy_intf in not_supporting_lpm_physical_ports: + logging.info("skip testing port {} which doesn't support LPM".format(intf)) + continue + if phy_intf in tested_physical_ports: + logging.info( + "skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) + continue + tested_physical_ports.add(phy_intf) + logging.info("restoring {} physical interface {}".format(intf, phy_intf)) + new_lpmode = original_lpmode[intf].lower() + lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf)) + assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) logging.info("Check SFP lower power mode again after changing SFP lpmode") lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) - assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode) + assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Check sfp presence again after setting lpmode") sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") namespace = duthost.get_namespace_from_asic_id(enum_frontend_asic_index) diff --git a/tests/platform_tests/sfp/test_show_intf_xcvr.py b/tests/platform_tests/sfp/test_show_intf_xcvr.py index 40d3b0d459f..fcfd26bd5b3 100644 --- a/tests/platform_tests/sfp/test_show_intf_xcvr.py +++ b/tests/platform_tests/sfp/test_show_intf_xcvr.py @@ -21,7 +21,7 @@ ] -def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -34,11 +34,12 @@ def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) + assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" -def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts): +def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ @@ -51,5 +52,6 @@ def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostnam sfp_eeprom = duthost.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in dev_conn: - assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" - assert parsed_eeprom[intf] == "SFP EEPROM detected" + if intf not in xcvr_skip_list[duthost.hostname]: + assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" + assert parsed_eeprom[intf] == "SFP EEPROM detected" From d57f778e850bf9c74e6cae372f5f3197f391cc9b Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Tue, 10 Aug 2021 16:28:30 -0700 Subject: [PATCH 079/117] [multi-asic] get the correct service name for multi asic (#3993) Fix for test test_restart_swss on multi asic platforms by getting the correct service name for multi asic platform Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- tests/platform_tests/test_sequential_restart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/platform_tests/test_sequential_restart.py b/tests/platform_tests/test_sequential_restart.py index aae49c089f2..d8f03889c07 100644 --- a/tests/platform_tests/test_sequential_restart.py +++ b/tests/platform_tests/test_sequential_restart.py @@ -53,7 +53,7 @@ def restart_service_and_check(localhost, dut, enum_frontend_asic_index, service, logging.info("Restart the %s service on asic %s" %(service, enum_frontend_asic_index)) asichost = dut.asic_instance(enum_frontend_asic_index) - service_name = asichost.get_docker_name(service) + service_name = asichost.get_service_name(service) dut.command("sudo systemctl restart {}".format(service_name)) for container in dut.get_default_critical_services_list(): From 20b918772f0c292ad0d67972071962357926c145 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Wed, 11 Aug 2021 14:09:54 +0800 Subject: [PATCH 080/117] [test_bgp_update_timer] Set correct neighbor type (#3990) Approach What is the motivation for this PR? Set correct neighbor type on storage backend topologies since the DUT type is either BackEndToRRouter or BackEndLeafRouter. Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you do it? How did you verify/test it? --- tests/bgp/test_bgp_update_timer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/bgp/test_bgp_update_timer.py b/tests/bgp/test_bgp_update_timer.py index 10a7dbe90e4..7f4df62af66 100644 --- a/tests/bgp/test_bgp_update_timer.py +++ b/tests/bgp/test_bgp_update_timer.py @@ -72,7 +72,7 @@ def common_setup_teardown(duthosts, rand_one_dut_hostname, is_dualtor, is_quagga if k == duthost.hostname: dut_type = v['type'] - if dut_type == 'ToRRouter': + if 'ToRRouter' in dut_type: neigh_type = 'LeafRouter' else: neigh_type = 'ToRRouter' From d751f314f1933a84bda0245db0b0b9770027bfd0 Mon Sep 17 00:00:00 2001 From: yozhao101 <56170650+yozhao101@users.noreply.github.com> Date: Tue, 10 Aug 2021 23:33:15 -0700 Subject: [PATCH 081/117] [memory_checker] Skip testing on Celestica E1031 platform. (#4005) What is the motivation for this PR? This PR aims to skip testing the memory checker against streaming telemetry container on platform Celestica E1031. Since the DuT of Celestica E1031 only has 2GB memory and around 230MB free memory, streaming telemetry container will consumes more than 400MB memory in order to test the feature of memory checker and this testing will cause the DuT to be in bad state. How did you do it? I used the pytest_require(...) to skip testing against the Celestica E1031. How did you verify/test it? I tested this change on str-e1031-acs-3. Any platform specific information? Supported testbed topology if it's a new test case? --- tests/memory_checker/test_memory_checker.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/memory_checker/test_memory_checker.py b/tests/memory_checker/test_memory_checker.py index 3f542af9e5c..96e154eaa7a 100644 --- a/tests/memory_checker/test_memory_checker.py +++ b/tests/memory_checker/test_memory_checker.py @@ -213,9 +213,10 @@ def test_memory_checker(duthosts, creds, enum_rand_one_per_hwsku_frontend_hostna container_name = "telemetry" vm_workers = 4 - pytest_require(("20191130" in duthost.os_version and parse_version(duthost.os_version) > parse_version("20191130.72")) - or parse_version(duthost.kernel_version) > parse_version("4.9.0"), - "Test is not supported for 20191130.72 and older image versions!") + pytest_require("Celestica-E1031" not in duthost.facts["hwsku"] + and (("20191130" in duthost.os_version and parse_version(duthost.os_version) > parse_version("20191130.72")) + or parse_version(duthost.kernel_version) > parse_version("4.9.0")), + "Test is not supported for platform Celestica E1031, 20191130.72 and older image versions!") expected_alerting_messages = [] From cb9a42b160df189e5d493ca408856d7aa1aa3b9a Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Wed, 11 Aug 2021 19:53:52 +0800 Subject: [PATCH 082/117] Update sensor test case for Mellanox SN4410 A1 system (#3988) - How did you do it? Add sensor conf for the SN4410 A1 system; Guide the test case to fetch the correct sensor conf while running the test on the SN4410 A1 system. - How did you verify/test it? Run test_sensor on the SN4410 A1 system - Any platform specific information? Only take effect on the Mellanox SN4410 A1 system --- ansible/group_vars/sonic/sku-sensors-data.yml | 225 ++++++++++++++++++ tests/platform_tests/test_sensors.py | 2 +- 2 files changed, 226 insertions(+), 1 deletion(-) diff --git a/ansible/group_vars/sonic/sku-sensors-data.yml b/ansible/group_vars/sonic/sku-sensors-data.yml index 8f254c7a474..d8af4b2d33b 100644 --- a/ansible/group_vars/sonic/sku-sensors-data.yml +++ b/ansible/group_vars/sonic/sku-sensors-data.yml @@ -3970,6 +3970,231 @@ sensors_checks: psu_skips: {} sensor_skip_per_version: {} + x86_64-mlnx_msn4410-r0-a1: + alarms: + fan: + - dps460-i2c-4-58/PSU-1(L) Fan 1/fan1_alarm + - dps460-i2c-4-58/PSU-1(L) Fan 1/fan1_fault + + - dps460-i2c-4-59/PSU-2(R) Fan 1/fan1_alarm + - dps460-i2c-4-59/PSU-2(R) Fan 1/fan1_fault + + - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 1/fan1_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 2/fan2_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 1/fan3_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 2/fan4_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 1/fan5_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 2/fan6_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 1/fan7_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 2/fan8_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-5 Tach 1/fan9_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-5 Tach 2/fan10_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 1/fan11_fault + - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 2/fan12_fault + power: + - mp2975-i2c-5-62/PMIC-1 PSU 12V Rail Curr (in1)/curr1_alarm + - mp2975-i2c-5-62/PMIC-1 ASIC 0.8V VCORE MAIN Rail Curr (out)/curr2_alarm + - mp2975-i2c-5-62/PMIC-1 PSU 12V Rail (in1)/in1_crit_alarm + - mp2975-i2c-5-62/PMIC-1 ASIC 0.8V VCORE MAIN Rail (out)/in2_lcrit_alarm + - mp2975-i2c-5-62/PMIC-1 ASIC 0.8V VCORE MAIN Rail (out)/in2_crit_alarm + - mp2975-i2c-5-62/PMIC-1 PSU 12V Rail Pwr (in1)/power1_alarm + + - mp2975-i2c-5-64/PMIC-2 PSU 12V Rail Curr (in1)/curr1_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.8V VCORE MAIN Rail Curr (out)/curr2_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.2V VCORE MAIN Rail Curr (out)/curr3_alarm + - mp2975-i2c-5-64/PMIC-2 PSU 12V Rail (in1)/in1_crit_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.8V VCORE MAIN Rail (out)/in2_lcrit_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.8V VCORE MAIN Rail (out)/in2_crit_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.2V VCORE MAIN Rail (out)/in3_crit_alarm + - mp2975-i2c-5-64/PMIC-2 ASIC 1.2V VCORE MAIN Rail (out)/in3_lcrit_alarm + - mp2975-i2c-5-64/PMIC-2 PSU 12V Rail Pwr (in1)/power1_alarm + + - mp2975-i2c-5-66/PMIC-3 PSU 12V Rail Curr (in1)/curr1_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 0.85V T0_1 Rail Curr (out)/curr2_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail Curr (out)/curr3_alarm + - mp2975-i2c-5-66/PMIC-3 PSU 12V Rail (in1)/in1_crit_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 0.85V T0_1 Rail (out)/in2_lcrit_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 0.85V T0_1 Rail (out)/in2_crit_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail (out)/in3_crit_alarm + - mp2975-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail (out)/in3_lcrit_alarm + - mp2975-i2c-5-66/PMIC-3 PSU 12V Rail Pwr (in1)/power1_alarm + + - mp2975-i2c-5-6a/PMIC-4 PSU 12V Rail Curr (in1)/curr1_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 0.85V T2_3 Rail Curr (out)/curr2_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 1.8V T2_3 Rail Curr (out)/curr3_alarm + - mp2975-i2c-5-6a/PMIC-4 PSU 12V Rail (in1)/in1_crit_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 0.85V T2_3 Rail (out)/in2_lcrit_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 0.85V T2_3 Rail (out)/in2_crit_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 1.8V T2_3 Rail (out)/in3_crit_alarm + - mp2975-i2c-5-6a/PMIC-4 ASIC 1.8V T2_3 Rail (out)/in3_lcrit_alarm + - mp2975-i2c-5-6a/PMIC-4 PSU 12V Rail Pwr (in1)/power1_alarm + + - mp2975-i2c-5-6e/PMIC-5 PSU 12V Rail Curr (in1)/curr1_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T0_3 Rail_1 Curr (out)/curr2_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T4_7 Rail_2 Curr (out)/curr3_alarm + - mp2975-i2c-5-6e/PMIC-5 PSU 12V Rail (in1)/in1_crit_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T0_3 Rail_1 (out)/in2_lcrit_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T0_3 Rail_1 (out)/in2_crit_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T4_7 Rail_2 (out)/in3_crit_alarm + - mp2975-i2c-5-6e/PMIC-5 ASIC 1.2V T4_7 Rail_2 (out)/in3_lcrit_alarm + - mp2975-i2c-5-6e/PMIC-5 PSU 12V Rail Pwr (in1)/power1_alarm + + - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_max_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_max_alarm + - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_crit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_lcrit_alarm + + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_max_alarm + - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_lcrit_alarm + + - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_crit_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_max_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail Curr (out)/curr2_crit_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail Curr (out)/curr2_max_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail Pwr (in)/power1_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail Pwr (out)/power2_crit_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail Pwr (out)/power2_max_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_crit_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_lcrit_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_max_alarm + - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_min_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_crit_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_lcrit_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_max_alarm + - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_min_alarm + + - dps460-i2c-4-59/PSU-2(R) 220V Rail Curr (in)/curr1_crit_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail Curr (in)/curr1_max_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail Curr (out)/curr2_crit_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail Curr (out)/curr2_max_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail Pwr (in)/power1_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail Pwr (out)/power2_crit_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail Pwr (out)/power2_max_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_crit_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_lcrit_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_max_alarm + - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_min_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_crit_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_lcrit_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_max_alarm + - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_min_alarm + temp: + - coretemp-isa-0000/\P[a-z]*\ id 0/temp1_crit_alarm + - coretemp-isa-0000/Core 0/temp2_crit_alarm + - coretemp-isa-0000/Core 1/temp3_crit_alarm + - coretemp-isa-0000/Core 2/temp4_crit_alarm + - coretemp-isa-0000/Core 3/temp5_crit_alarm + + - mlxsw-i2c-2-48/front panel 001/temp2_fault + - mlxsw-i2c-2-48/front panel 002/temp3_fault + - mlxsw-i2c-2-48/front panel 003/temp4_fault + - mlxsw-i2c-2-48/front panel 004/temp5_fault + - mlxsw-i2c-2-48/front panel 005/temp6_fault + - mlxsw-i2c-2-48/front panel 006/temp7_fault + - mlxsw-i2c-2-48/front panel 007/temp8_fault + - mlxsw-i2c-2-48/front panel 008/temp9_fault + - mlxsw-i2c-2-48/front panel 009/temp10_fault + - mlxsw-i2c-2-48/front panel 010/temp11_fault + - mlxsw-i2c-2-48/front panel 011/temp12_fault + - mlxsw-i2c-2-48/front panel 012/temp13_fault + - mlxsw-i2c-2-48/front panel 013/temp14_fault + - mlxsw-i2c-2-48/front panel 014/temp15_fault + - mlxsw-i2c-2-48/front panel 015/temp16_fault + - mlxsw-i2c-2-48/front panel 016/temp17_fault + - mlxsw-i2c-2-48/front panel 017/temp18_fault + - mlxsw-i2c-2-48/front panel 018/temp19_fault + - mlxsw-i2c-2-48/front panel 019/temp20_fault + - mlxsw-i2c-2-48/front panel 020/temp21_fault + - mlxsw-i2c-2-48/front panel 021/temp22_fault + - mlxsw-i2c-2-48/front panel 022/temp23_fault + - mlxsw-i2c-2-48/front panel 023/temp24_fault + - mlxsw-i2c-2-48/front panel 024/temp25_fault + - mlxsw-i2c-2-48/front panel 025/temp26_fault + - mlxsw-i2c-2-48/front panel 026/temp27_fault + - mlxsw-i2c-2-48/front panel 027/temp28_fault + - mlxsw-i2c-2-48/front panel 028/temp29_fault + - mlxsw-i2c-2-48/front panel 029/temp30_fault + - mlxsw-i2c-2-48/front panel 030/temp31_fault + - mlxsw-i2c-2-48/front panel 031/temp32_fault + - mlxsw-i2c-2-48/front panel 032/temp33_fault + + - mp2975-i2c-5-62/PMIC-1 Temp 1/temp1_crit_alarm + - mp2975-i2c-5-62/PMIC-1 Temp 1/temp1_max_alarm + + - mp2975-i2c-5-64/PMIC-2 Temp 1/temp1_crit_alarm + - mp2975-i2c-5-64/PMIC-2 Temp 1/temp1_max_alarm + + - mp2975-i2c-5-66/PMIC-3 Temp 1/temp1_crit_alarm + - mp2975-i2c-5-66/PMIC-3 Temp 1/temp1_max_alarm + + - mp2975-i2c-5-6a/PMIC-4 Temp 1/temp1_crit_alarm + - mp2975-i2c-5-6a/PMIC-4 Temp 1/temp1_max_alarm + + - mp2975-i2c-5-6e/PMIC-5 Temp 1/temp1_crit_alarm + - mp2975-i2c-5-6e/PMIC-5 Temp 1/temp1_max_alarm + + - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_crit_alarm + - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_max_alarm + - tps53679-i2c-15-58/PMIC-8 Temp 2/temp2_crit_alarm + - tps53679-i2c-15-58/PMIC-8 Temp 2/temp2_max_alarm + + - tps53679-i2c-15-61/PMIC-9 Temp 1/temp1_crit_alarm + - tps53679-i2c-15-61/PMIC-9 Temp 1/temp1_max_alarm + - tps53679-i2c-15-61/PMIC-9 Temp 2/temp2_crit_alarm + - tps53679-i2c-15-61/PMIC-9 Temp 2/temp2_max_alarm + + - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_crit_alarm + - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_max_alarm + - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_crit_alarm + - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_max_alarm + - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_crit_alarm + - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_max_alarm + + - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_crit_alarm + - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_max_alarm + - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_crit_alarm + - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_max_alarm + - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_crit_alarm + - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_max_alarm + compares: + power: [] + temp: + - - coretemp-isa-0000/\P[a-z]*\ id 0/temp1_input + - coretemp-isa-0000/\P[a-z]*\ id 0/temp1_crit + - - coretemp-isa-0000/Core 0/temp2_input + - coretemp-isa-0000/Core 0/temp2_crit + - - coretemp-isa-0000/Core 1/temp3_input + - coretemp-isa-0000/Core 1/temp3_crit + - - coretemp-isa-0000/Core 2/temp4_input + - coretemp-isa-0000/Core 2/temp4_crit + - - coretemp-isa-0000/Core 3/temp5_input + - coretemp-isa-0000/Core 3/temp5_crit + + - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_input + - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_max + + - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_input + - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_max + + - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_input + - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_max + non_zero: + fan: [] + power: [] + temp: [] + psu_skips: {} + sensor_skip_per_version: {} + x86_64-mlnx_msn4600c-r0: alarms: fan: diff --git a/tests/platform_tests/test_sensors.py b/tests/platform_tests/test_sensors.py index b7a8814421f..eee19bfc3f1 100644 --- a/tests/platform_tests/test_sensors.py +++ b/tests/platform_tests/test_sensors.py @@ -25,7 +25,7 @@ def test_sensors(duthosts, rand_one_dut_hostname, creds): logging.info("Sensor checks:\n{}".format(to_json(sensors_checks[platform]))) # Special treatment for Mellanox platforms which have two different A0 and A1 types - if platform in ['x86_64-mlnx_msn4700-r0', 'x86_64-mlnx_msn4600-r0']: + if platform in ['x86_64-mlnx_msn4700-r0', 'x86_64-mlnx_msn4600-r0', 'x86_64-mlnx_msn4410-r0']: # Check the hardware version and choose sensor conf data accordingly output = duthost.command('cat /run/hw-management/system/config1', module_ignore_errors=True) if output["rc"] == 0 and output["stdout"] == '1': From aa084161150dd353d7efa7053ffeadbb817b664e Mon Sep 17 00:00:00 2001 From: Renu Falodiya <73910672+oxygen980@users.noreply.github.com> Date: Wed, 11 Aug 2021 15:17:35 +0000 Subject: [PATCH 083/117] [chassis] Skip setupDutConfig module for modular chassis (#3788) What is the motivation for this PR? Teat case 'arp/test_neighbor_mac_noptf.py' brings all bgp down, In case of Modular chassis testcase fails because VOQ chassis has extra routes so when script brings bgp down and verify for the routes and expects routes should be zero never works. How did you do it? To Take care of this failure, we do not shutdown all BGP neighbors for a modular chassis. The reasons are:- When we bring BGP down using 'sudo config bgp shutdown all' on VOQ chassis linecard, it only brings down the eBGP neighbors, and not the BGP_VOQ_CHASSIS_NEIGHBORs (the iBGP neighbors to other asics in the chassis) The asic has routes that are learnt from other remote asics in the chassis. VoQ architecture adds static routes for inband interfaces and all eBGP peers on the remote asics as well. To get all the routes to be flushed could be very complex. Also, the reason that the BGP shutdown was added was because on some DUT's are overwhelmed with the BGP updates and thus causing this test to intermittently fail. However, we don't see such intermittent failure on linecards of a VoQ chassis which has 12K routes in a T2 topology - possibly because CPU is powerful. --- tests/arp/test_neighbor_mac_noptf.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/arp/test_neighbor_mac_noptf.py b/tests/arp/test_neighbor_mac_noptf.py index 320bbb432b7..56ea5ae829a 100644 --- a/tests/arp/test_neighbor_mac_noptf.py +++ b/tests/arp/test_neighbor_mac_noptf.py @@ -69,9 +69,10 @@ def setupDutConfig(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname): None """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - duthost.command("sudo config bgp shutdown all") - if not wait_until(120, 2.0, self._check_no_bgp_routes, duthost): - pytest.fail('BGP Shutdown Timeout: BGP route removal exceeded 120 seconds.') + if not duthost.get_facts().get("modular_chassis"): + duthost.command("sudo config bgp shutdown all") + if not wait_until(120, 2.0, self._check_no_bgp_routes, duthost): + pytest.fail('BGP Shutdown Timeout: BGP route removal exceeded 120 seconds.') yield From 6eb680057c179fd8b9dce39324265b1d762f1606 Mon Sep 17 00:00:00 2001 From: Ye Jianquan <595101714@qq.com> Date: Thu, 12 Aug 2021 11:22:22 +0800 Subject: [PATCH 084/117] FIX: fix ipfwd/test_mtu testcase failure in backend storage topology (#3998) * FIX: fix ipfwd/test_mtu testcase failure in backend storage topology Approach What is the motivation for this PR? Fix the nightly testcase failure. https://github.com/Azure/sonic-mgmt/issues/3879 How did you do it? Update interface/sub-interface fetch logic to compatible with backend storage t1 topology, details in the code. How did you verify/test it? Run the testcase and make sure they passed ipfwd/test_mtu.py::test_mtu[str2-7050qx-32s-acs-03-None-1514] PASSED [ 50%] ipfwd/test_mtu.py::test_mtu[str2-7050qx-32s-acs-03-None-9114] PASSED [100%] Co-authored-by: Jianquan Ye --- tests/common/constants.py | 6 ++++++ tests/common/devices/sonic.py | 18 ++++++++++++++++++ tests/ipfwd/conftest.py | 24 ++++++++++++++++++++---- tests/ipfwd/test_mtu.py | 1 + 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/tests/common/constants.py b/tests/common/constants.py index a7be6088851..87c4d3e3a03 100644 --- a/tests/common/constants.py +++ b/tests/common/constants.py @@ -1,3 +1,9 @@ VLAN_SUB_INTERFACE_SEPARATOR = "." # default port mapping mode for storage backend testbeds PTF_PORT_MAPPING_MODE_DEFAULT = "use_sub_interface" +TOPO_KEY = "topo" +NAME_KEY = "name" +# field in mg_facts to flag whether it's a backend topology or not +IS_BACKEND_TOPOLOGY_KEY = "is_backend_topology" +# a topology whos name contains the indicator 'backend' will be considered as a backend topology +BACKEND_TOPOLOGY_IND = "backend" diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 3ca368f9edb..3e55777e6a1 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -18,6 +18,7 @@ from tests.common.cache import cached from tests.common.helpers.constants import DEFAULT_ASIC_ID, DEFAULT_NAMESPACE from tests.common.errors import RunAnsibleModuleFail +from tests.common import constants logger = logging.getLogger(__name__) @@ -1189,7 +1190,24 @@ def get_extended_minigraph_facts(self, tbinfo, namespace = DEFAULT_NAMESPACE): except (ValueError, KeyError): pass + # set 'backend' flag for mg_facts + # a 'backend' topology may has different name convention for some parameter + self.update_backend_flag(tbinfo, mg_facts) + return mg_facts + + def update_backend_flag(self, tbinfo, mg_facts): + mg_facts[constants.IS_BACKEND_TOPOLOGY_KEY] = self.assert_topo_is_backend(tbinfo) + + # assert whether a topo is 'backend' type + def assert_topo_is_backend(self, tbinfo): + topo_key = constants.TOPO_KEY + name_key = constants.NAME_KEY + if topo_key in tbinfo.keys() and name_key in tbinfo[topo_key].keys(): + topo_name = tbinfo[topo_key][name_key] + if constants.BACKEND_TOPOLOGY_IND in topo_name: + return True + return False def run_redis_cli_cmd(self, redis_cmd): cmd = "/usr/bin/redis-cli {}".format(redis_cmd) diff --git a/tests/ipfwd/conftest.py b/tests/ipfwd/conftest.py index b5238c3dc57..773819d21d5 100644 --- a/tests/ipfwd/conftest.py +++ b/tests/ipfwd/conftest.py @@ -3,6 +3,7 @@ import logging import json import time +from tests.common import constants logger = logging.getLogger(__name__) @@ -45,15 +46,30 @@ def get_lag_facts(dut, lag_facts, switch_arptable, mg_facts, ignore_lags, enum_f def get_port_facts(dut, mg_facts, port_status, switch_arptable, ignore_intfs, key='src'): - if not mg_facts['minigraph_interfaces']: - pytest.fail("minigraph_interfaces is not defined.") + interfaces = None + is_backend_topology = constants.IS_BACKEND_TOPOLOGY_KEY in mg_facts.keys() and mg_facts[constants.IS_BACKEND_TOPOLOGY_KEY] + if is_backend_topology: + interfaces = mg_facts['minigraph_vlan_sub_interfaces'] + else: + interfaces = mg_facts['minigraph_interfaces'] + + if not interfaces: + pytest.fail("interfaces is not defined.") + selected_port_facts = {} up_port = None for a_intf_name, a_intf_data in port_status['int_status'].items(): if a_intf_data['oper_state'] == 'up' and a_intf_name not in ignore_intfs: # Got a port that is up and not already used. - for intf in mg_facts['minigraph_interfaces']: - if intf['attachto'] == a_intf_name: + for intf in interfaces: + attachto_match = False + if is_backend_topology: + # e.g. a_inft_name: 'Ethernet8' attachto:'Ethernet8.10' + attachto_match = (a_intf_name + constants.VLAN_SUB_INTERFACE_SEPARATOR) in intf['attachto'] + else: + attachto_match = intf['attachto'] == a_intf_name + + if attachto_match: up_port = a_intf_name selected_port_facts[key + '_port_ids'] = [mg_facts['minigraph_ptf_indices'][a_intf_name]] selected_port_facts[key + '_router_mac'] = dut.facts['router_mac'] diff --git a/tests/ipfwd/test_mtu.py b/tests/ipfwd/test_mtu.py index 487b0746d87..7c2d8eef631 100644 --- a/tests/ipfwd/test_mtu.py +++ b/tests/ipfwd/test_mtu.py @@ -3,6 +3,7 @@ import logging from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode from tests.ptf_runner import ptf_runner from datetime import datetime From fb36c670de717a543f3577fb86cc8799714398af Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 12 Aug 2021 18:48:17 +0800 Subject: [PATCH 085/117] Log mux cable status if ```test_fdb``` failed on dualtor testbed (#4014) * Log mux cable status if test_fdb failed Signed-off-by: bingwang --- tests/fdb/test_fdb.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/fdb/test_fdb.py b/tests/fdb/test_fdb.py index 92203a1bf73..d7c5726c863 100644 --- a/tests/fdb/test_fdb.py +++ b/tests/fdb/test_fdb.py @@ -182,9 +182,20 @@ def fdb_cleanup(duthosts, rand_one_dut_hostname): pytest_assert(wait_until(20, 2, fdb_table_has_no_dynamic_macs, duthost), "FDB Table Cleanup failed") +@pytest.fixture +def record_mux_status(request, rand_selected_dut, tbinfo): + """ + A function level fixture to record mux cable status if test failed. + """ + yield + if request.node.rep_call.failed and 'dualtor' in tbinfo['topo']['name']: + mux_status = rand_selected_dut.shell("show muxcable status", module_ignore_errors=True)['stdout'] + logger.warning("fdb test failed. Mux status are \n {}".format(mux_status)) + + @pytest.mark.bsl @pytest.mark.parametrize("pkt_type", PKT_TYPES) -def test_fdb(ansible_adhoc, ptfadapter, duthosts, rand_one_dut_hostname, ptfhost, pkt_type, toggle_all_simulator_ports_to_rand_selected_tor): +def test_fdb(ansible_adhoc, ptfadapter, duthosts, rand_one_dut_hostname, ptfhost, pkt_type, toggle_all_simulator_ports_to_rand_selected_tor, record_mux_status): # Perform FDB clean up before each test and at the end of the final test fdb_cleanup(duthosts, rand_one_dut_hostname) From 19ddcf6a4c96ae31fd68075e6bd8d22348660eb7 Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Thu, 12 Aug 2021 17:50:55 -0700 Subject: [PATCH 086/117] [TH3] Skip unsupported test cases (#4006) Co-authored-by: Ubuntu --- ansible/group_vars/sonic/variables | 2 +- tests/drop_packets/test_drop_counters.py | 10 +++++++++- tests/everflow/test_everflow_testbed.py | 9 +++++++++ tests/iface_namingmode/test_iface_namingmode.py | 3 +++ 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 3a708b358d1..a504af68633 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -10,7 +10,7 @@ broadcom_td2_hwskus: ['Force10-S6000', 'Force10-S6000-Q24S32', 'Arista-7050-QX32 broadcom_td3_hwskus: ['Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8'] broadcom_th_hwskus: ['Force10-S6100', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32S-C32-T1', 'Arista-7060CX-32S-D48C8', 'Celestica-DX010-C32', "Seastone-DX010" ] broadcom_th2_hwskus: ['Arista-7260CX3-D108C8', 'Arista-7260CX3-C64', 'Arista-7260CX3-Q64'] -broadcom_th3_hwskus: ['DellEMC-Z9332f-M-O16C64', 'DellEMC-Z9332f-M-O32'] +broadcom_th3_hwskus: ['DellEMC-Z9332f-M-O16C64', 'DellEMC-Z9332f-O32'] mellanox_spc1_hwskus: [ 'ACS-MSN2700', 'ACS-MSN2740', 'ACS-MSN2100', 'ACS-MSN2410', 'ACS-MSN2010', 'Mellanox-SN2700', 'Mellanox-SN2700-D48C8' ] mellanox_spc2_hwskus: [ 'ACS-MSN3700', 'ACS-MSN3700C', 'ACS-MSN3800', 'Mellanox-SN3800-D112C8' , 'ACS-MSN3420'] diff --git a/tests/drop_packets/test_drop_counters.py b/tests/drop_packets/test_drop_counters.py index e1a15c8a2fb..f6cecd10d54 100755 --- a/tests/drop_packets/test_drop_counters.py +++ b/tests/drop_packets/test_drop_counters.py @@ -96,6 +96,10 @@ def parse_combined_counters(duthosts, rand_one_dut_hostname): def acl_setup(duthosts, loganalyzer): """ Create acl rule defined in config file. Delete rule after test case finished """ for duthost in duthosts: + acl_facts = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"] + if 'DATAACL' not in acl_facts.keys(): + pytest.skip("Skipping test since DATAACL table is not supported on this platform") + base_dir = os.path.dirname(os.path.realpath(__file__)) template_dir = os.path.join(base_dir, 'acl_templates') acl_rules_template = "acltb_test_rule.json" @@ -305,7 +309,11 @@ def test_acl_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, t @summary: Verify that DUT drops packet with SRC IP 20.0.0.0/24 matched by ingress ACL and ACL drop counter incremented """ duthost = duthosts[rand_one_dut_hostname] - if tx_dut_ports[ports_info["dut_iface"]] not in duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["ports"]: + acl_facts = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"] + if 'DATAACL' not in acl_facts.keys(): + pytest.skip("Skipping test since DATAACL table is not supported on this platform") + + if tx_dut_ports[ports_info["dut_iface"]] not in acl_facts["DATAACL"]["ports"]: pytest.skip("RX DUT port absent in 'DATAACL' table") ip_src = "20.0.0.5" diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index 8cbfee49bff..9f77cc95036 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -68,6 +68,7 @@ class EverflowIPv4Tests(BaseEverflowTest): DEFAULT_SRC_IP = "20.0.0.1" DEFAULT_DST_IP = "30.0.0.1" + MIRROR_POLICER_UNSUPPORTED_ASIC_LIST = ["th3"] @pytest.fixture(params=["tor", "spine"]) def dest_port_type(self, duthosts, rand_one_dut_hostname, setup_info, setup_mirror_session, tbinfo, request): @@ -413,6 +414,14 @@ def test_everflow_dscp_with_policer( # NOTE: This is important to add since for the Policer test case regular packets # and mirror packets can go to same interface, which causes tail drop of # police packets and impacts test case cir/cbs calculation. + + vendor = duthost.facts["asic_type"] + hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] + for asic in self.MIRROR_POLICER_UNSUPPORTED_ASIC_LIST: + vendorAsic = "{0}_{1}_hwskus".format(vendor, asic) + if vendorAsic in hostvars.keys() and duthost.facts['hwsku'] in hostvars[vendorAsic]: + pytest.skip("Skipping test since mirror policing is not supported on {0} {1} platforms".format(vendor,asic)) + default_tarffic_port_type = "tor" if dest_port_type == "spine" else "spine" default_traffic_tx_port = setup_info[default_tarffic_port_type]["dest_port"][0] default_traffic_peer_ip = everflow_utils.get_neighbor_info(duthost, default_traffic_tx_port, tbinfo) diff --git a/tests/iface_namingmode/test_iface_namingmode.py b/tests/iface_namingmode/test_iface_namingmode.py index 85a68725d7d..6a2a2044013 100644 --- a/tests/iface_namingmode/test_iface_namingmode.py +++ b/tests/iface_namingmode/test_iface_namingmode.py @@ -750,6 +750,9 @@ def test_show_acl_table(setup, setup_config_mode, tbinfo): dutHostGuest, mode, ifmode = setup_config_mode minigraph_acls = setup['minigraph_facts']['minigraph_acls'] + if 'DataAcl' not in minigraph_acls: + pytest.skip("Skipping test since DATAACL table is not supported on this platform") + acl_table = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} show acl table DATAACL'.format(ifmode))['stdout'] logger.info('acl_table:\n{}'.format(acl_table)) From 4e2d7320d14646c6bd8a071e392cab7cca208ae5 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Thu, 12 Aug 2021 19:07:53 -0700 Subject: [PATCH 087/117] Update interface_facts.py for Python 3 Signed-off-by: Saikrishna Arcot --- ansible/library/interface_facts.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/library/interface_facts.py b/ansible/library/interface_facts.py index 654bc6c9b4a..abb81901e11 100644 --- a/ansible/library/interface_facts.py +++ b/ansible/library/interface_facts.py @@ -90,7 +90,7 @@ def get_default_interfaces(ip_path): # v6 routing may result in # RTNETLINK answers: Invalid argument continue - words = out.split('\n')[0].split() + words = out.decode('utf8').split('\n')[0].split() # A valid output starts with the queried address on the first line if len(words) > 0 and words[0] == command[key][-1]: for i in range(len(words) - 1): @@ -188,7 +188,7 @@ def gather_ip_interface_info(): interfaces[device]['promisc'] = promisc_mode def parse_ip_output(output, secondary=False): - for line in output.split('\n'): + for line in output.decode('utf8').split('\n'): if not line: continue words = line.split() @@ -284,7 +284,7 @@ def parse_ip_output(output, secondary=False): parse_ip_output(secondary_data, secondary=True) buffer = {'interfaces':interfaces, 'ips':ips} - print json.dumps(buffer) + print(json.dumps(buffer)) gather_ip_interface_info() """ From 3441ac309ce74bdc705bc31049383ec3bd83e14b Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Thu, 12 Aug 2021 19:10:08 -0700 Subject: [PATCH 088/117] Have tcpdump use Linux cooked v1 instead of v2 when listening on any interface On Bullseye, tcpdump defaults to writing pcap files in Linux cooked v2 format. Scapy, however, doesn't support reading Linux cooked v2. So force tcpdump to use the older format. Signed-off-by: Saikrishna Arcot --- tests/bgp/test_bgp_update_timer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/bgp/test_bgp_update_timer.py b/tests/bgp/test_bgp_update_timer.py index 7f4df62af66..b64229e4d06 100644 --- a/tests/bgp/test_bgp_update_timer.py +++ b/tests/bgp/test_bgp_update_timer.py @@ -37,7 +37,13 @@ @contextlib.contextmanager def log_bgp_updates(duthost, iface, save_path): """Capture bgp packets to file.""" - start_pcap = "tcpdump -i %s -w %s port 179" % (iface, save_path) + if iface == "any": + # Scapy doesn't support LINUX_SLL2 (Linux cooked v2), and tcpdump on Bullseye + # defaults to writing in that format when listening on any interface. Therefore, + # have it use LINUX_SLL (Linux cooked) instead. + start_pcap = "tcpdump -y LINUX_SLL -i %s -w %s port 179" % (iface, save_path) + else: + start_pcap = "tcpdump -i %s -w %s port 179" % (iface, save_path) stop_pcap = "pkill -f '%s'" % start_pcap start_pcap = "nohup %s &" % start_pcap duthost.shell(start_pcap) From 0f59bdf05d33dd435b43af968985a08dbf4b6223 Mon Sep 17 00:00:00 2001 From: Anton Ptashnik Date: Fri, 13 Aug 2021 11:30:15 +0300 Subject: [PATCH 089/117] tacacs test: fixed wrong assertion which caused tests to pass even when operation failed (#4017) TACACS tests execute a CLI command on DUT and verify its output is as expected. Command output check is implemented in the way it can be skipped when the command fails and tests pass then. Addressed the issue. - dedicated a function for repeated test assertion fragment Problematic code fragment: ``` res = ssh_remote_run(dut, cmd) for l in res['stdout_lines']: # stdout_lines is empty when cmd fails so assert is never done assert ... ``` --- tests/tacacs/test_jit_user.py | 18 ++++++------------ tests/tacacs/test_ro_user.py | 11 +++-------- tests/tacacs/test_rw_user.py | 11 +++-------- tests/tacacs/utils.py | 9 +++++++++ 4 files changed, 21 insertions(+), 28 deletions(-) create mode 100644 tests/tacacs/utils.py diff --git a/tests/tacacs/test_jit_user.py b/tests/tacacs/test_jit_user.py index 8efe8f06ec2..876a31c3341 100644 --- a/tests/tacacs/test_jit_user.py +++ b/tests/tacacs/test_jit_user.py @@ -2,6 +2,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.plugins.tacacs import setup_tacacs_server from .test_ro_user import ssh_remote_run +from .utils import check_output pytestmark = [ pytest.mark.disable_loganalyzer, @@ -18,10 +19,8 @@ def test_jit_user(localhost, duthosts, ptfhost, enum_rand_one_per_hwsku_hostname dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_jit_user'], creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'cat /etc/passwd') - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "test": - assert fds[4] == "remote_user" + + check_output(res, 'test', 'remote_user') # change jit user to netadmin creds_all_duts[duthost]['tacacs_jit_user_membership'] = 'netadmin' @@ -29,10 +28,8 @@ def test_jit_user(localhost, duthosts, ptfhost, enum_rand_one_per_hwsku_hostname res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_jit_user'], creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'cat /etc/passwd') - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "testadmin": - assert fds[4] == "remote_user_su" + + check_output(res, 'testadmin', 'remote_user_su') # change jit user back to netuser creds_all_duts[duthost]['tacacs_jit_user_membership'] = 'netuser' @@ -40,7 +37,4 @@ def test_jit_user(localhost, duthosts, ptfhost, enum_rand_one_per_hwsku_hostname res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_jit_user'], creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'cat /etc/passwd') - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "test": - assert fds[4] == "remote_user" + check_output(res, 'test', 'remote_user') \ No newline at end of file diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py index b3dfafcf64f..763e210e580 100644 --- a/tests/tacacs/test_ro_user.py +++ b/tests/tacacs/test_ro_user.py @@ -1,6 +1,7 @@ import pytest import time from tests.common.helpers.assertions import pytest_assert +from .utils import check_output pytestmark = [ pytest.mark.disable_loganalyzer, @@ -77,10 +78,7 @@ def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_al res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], creds_all_duts[duthost]['tacacs_ro_user_passwd'], 'cat /etc/passwd') - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "test": - assert fds[4] == "remote_user" + check_output(res, 'test', 'remote_user') def test_ro_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs_v6): duthost = duthosts[enum_rand_one_per_hwsku_hostname] @@ -88,10 +86,7 @@ def test_ro_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, cre res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], creds_all_duts[duthost]['tacacs_ro_user_passwd'], 'cat /etc/passwd') - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "test": - assert fds[4] == "remote_user" + check_output(res, 'test', 'remote_user') def test_ro_user_allowed_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] diff --git a/tests/tacacs/test_rw_user.py b/tests/tacacs/test_rw_user.py index 58d6f5417dd..654c367d474 100644 --- a/tests/tacacs/test_rw_user.py +++ b/tests/tacacs/test_rw_user.py @@ -2,6 +2,7 @@ import crypt from .test_ro_user import ssh_remote_run +from .utils import check_output pytestmark = [ pytest.mark.disable_loganalyzer, @@ -18,10 +19,7 @@ def test_rw_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_al res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_rw_user'], creds_all_duts[duthost]['tacacs_rw_user_passwd'], "cat /etc/passwd") - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "testadmin": - assert fds[4] == "remote_user_su" + check_output(res, 'testadmin', 'remote_user_su') def test_rw_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs_v6): """test tacacs rw user @@ -31,7 +29,4 @@ def test_rw_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, cre res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_rw_user'], creds_all_duts[duthost]['tacacs_rw_user_passwd'], "cat /etc/passwd") - for l in res['stdout_lines']: - fds = l.split(':') - if fds[0] == "testadmin": - assert fds[4] == "remote_user_su" + check_output(res, 'testadmin', 'remote_user_su') diff --git a/tests/tacacs/utils.py b/tests/tacacs/utils.py new file mode 100644 index 00000000000..b8f3ae77f51 --- /dev/null +++ b/tests/tacacs/utils.py @@ -0,0 +1,9 @@ +from tests.common.helpers.assertions import pytest_assert + + +def check_output(output, exp_val1, exp_val2): + pytest_assert(not output['failed'], output['stderr']) + for l in output['stdout_lines']: + fds = l.split(':') + if fds[0] == exp_val1: + pytest_assert(fds[4] == exp_val2) \ No newline at end of file From 69973979439c99ff26b7ded5befb1c4671caad5f Mon Sep 17 00:00:00 2001 From: Anton Ptashnik Date: Fri, 13 Aug 2021 11:58:55 +0300 Subject: [PATCH 090/117] added EOS fanouts support for port autoneg tests (#3823) Summary: added EOS fanouts support for port autoneg tests Fixes # (issue) implemented autoneg related management methods for EOS fanout Port autoneg setup is required on both sides, per cable connection, so tests use a fanout as neighbor for DUT to establish autoneg with. Interaction with a fanout rely on per OS specific implementation and some methods required by tests were missing for EOS. Implemented those methods --- tests/common/devices/eos.py | 88 ++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 2 deletions(-) diff --git a/tests/common/devices/eos.py b/tests/common/devices/eos.py index 08275f3b248..97294437cd4 100644 --- a/tests/common/devices/eos.py +++ b/tests/common/devices/eos.py @@ -1,12 +1,16 @@ import ipaddress import json import logging +import re from tests.common.devices.base import AnsibleHostBase logger = logging.getLogger(__name__) - +def _raise_err(msg): + logger.error(msg) + raise Exception(msg) + class EosHost(AnsibleHostBase): """ @summary: Class for Eos switch @@ -183,4 +187,84 @@ def get_route(self, prefix): return self.eos_command(commands=[{ 'command': '{} {}'.format(cmd, prefix), 'output': 'json' - }])['stdout'][0] + }])['stdout'][0] + + def get_auto_negotiation_mode(self, interface_name): + output = self.eos_command(commands=[{ + 'command': 'show interfaces %s status' % interface_name, + 'output': 'json' + }]) + if self._has_cli_cmd_failed(output): + _raise_err('Failed to get auto neg state for {}: {}'.format(interface_name, output['msg'])) + autoneg_enabled = output['stdout'][0]['interfaceStatuses'][interface_name]['autoNegotiateActive'] + return autoneg_enabled + + def _reset_port_speed(self, interface_name): + out = self.eos_config( + lines=['default speed'], + parents=['interface {}'.format(interface_name)]) + logger.debug('Reset port speed for %s: %s' % (interface_name, out)) + return not self._has_cli_cmd_failed(out) + + def set_auto_negotiation_mode(self, interface_name, enabled): + if self.get_auto_negotiation_mode(interface_name) == enabled: + return True + + if enabled: + speed_to_advertise = self.get_supported_speeds(interface_name)[-1] + speed_to_advertise = speed_to_advertise[:-3] + 'gfull' + out = self.eos_config( + lines=['speed auto %s' % speed_to_advertise], + parents=['interface {}'.format(interface_name)]) + logger.debug('Set auto neg to {} for port {}: {}'.format(enabled, interface_name, out)) + return not self._has_cli_cmd_failed(out) + return self._reset_port_speed(interface_name) + + + def get_speed(self, interface_name): + output = self.eos_command(commands=['show interfaces %s transceiver properties' % interface_name]) + found_txt = re.search(r'Operational Speed: (\S+)', output['stdout'][0]) + if found_txt is None: + _raise_err('Not able to extract interface %s speed from output: %s' % (interface_name, output['stdout'])) + + v = found_txt.groups()[0] + return v[:-1] + '000' + + def _has_cli_cmd_failed(self, cmd_output_obj): + return 'failed' in cmd_output_obj and cmd_output_obj['failed'] + + def set_speed(self, interface_name, speed): + + if not speed: + # other set_speed implementations advertise port speeds when speed=None + # but in EOS autoneg activation and speeds advertisement is done via a single CLI cmd + # so this branch left nop intentionally + return True + + speed = speed[:-3] + 'gfull' + out = self.host.eos_config( + lines=['speed forced {}'.format(speed)], + parents='interface %s' % interface_name)[self.hostname] + logger.debug('Set force speed for port {} : {}'.format(interface_name, out)) + return not self._has_cli_cmd_failed(out) + + def get_supported_speeds(self, interface_name): + """Get supported speeds for a given interface + + Args: + interface_name (str): Interface name + + Returns: + list: A list of supported speed strings or None + """ + output = self.eos_command(commands=['show interfaces %s capabilities' % interface_name]) + found_txt = re.search("Speed/Duplex: (.+)", output['stdout'][0]) + if found_txt is None: + _raise_err('Failed to find port speeds list in output: %s' % output['stdout']) + + speed_list = found_txt.groups()[0] + speed_list = speed_list.split(',') + speed_list.remove('auto') + def extract_speed_only(v): + return re.match('\d+', v).group() + '000' + return list(map(extract_speed_only, speed_list)) From 867f87c13a8ff60000e3d4c1bf42656fdee4e38a Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Fri, 13 Aug 2021 17:25:33 +0800 Subject: [PATCH 091/117] [bug fix][test_container_checker] change config of monit to stablize the test #7 (#4008) Because the Monit sampling interval is too long (60s), and the syncd container restart time is rather short (sometimes it just needs about 30s), and the alert message rule is too strict, so sometimes Monit can not monitoring syncd down for 2 times for 2 mins and there are no syncd alert messages in syslog. By changing the relevant config of Monit, we can stabilize the test. Changing the sampling intervals to 10 in /etc/monit/monitrc ensures that the Monit can monitor syncd container down. Changing the start delay to 10 in /etc/monit/monitrc ensures that the Monit start quicker than syncd start. ``` ## Start Monit in the background (run as a daemon): # set daemon 10 # check services at 1-minute intervals with start delay 10 # we delay Monit to start monitoring for 5 minutes # intentionally such that all containers and processes # have ample time to start up. # ``` Changing the rule of alerting messages in /etc/monit/conf.d/sonic-host makes it is easy to send alert messages. ``` check program container_checker with path "/usr/bin/container_checker" if status != 0 for 1 times within 1 cycles then alert repeat every 1 cycles ``` #### How did you verify/test it? run test: `py.test container_checker/test_container_checker.py --inventory "../ansible/inventory, ../ansible/veos" --host-pattern arc-switch1025 --module-path ../ansible/library/ --testbed arc-switch1025-t0 --testbed_file ../ansible/testbed.csv --allow_recover` --- tests/container_checker/test_container_checker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/container_checker/test_container_checker.py b/tests/container_checker/test_container_checker.py index ad8ec308b19..acb6c64bf1f 100755 --- a/tests/container_checker/test_container_checker.py +++ b/tests/container_checker/test_container_checker.py @@ -70,11 +70,12 @@ def update_monit_service(duthost): duthost.shell("sudo cp -f /etc/monit/monitrc /tmp/") duthost.shell("sudo cp -f /etc/monit/conf.d/sonic-host /tmp/") - temp_config_line = " if status != 0 for 2 times within 2 cycles then alert repeat every 1 cycles" + temp_config_line = " if status != 0 for 1 times within 1 cycles then alert repeat every 1 cycles" logger.info("Reduce the monitoring interval of container_checker.") duthost.shell("sudo sed -i '$s/^./#/' /etc/monit/conf.d/sonic-host") duthost.shell("echo '{}' | sudo tee -a /etc/monit/conf.d/sonic-host".format(temp_config_line)) - duthost.shell("sudo sed -i '/with start delay 300/s/^./#/' /etc/monit/monitrc") + duthost.shell("sudo sed -i 's/with start delay 300/with start delay 10/' /etc/monit/monitrc") + duthost.shell("sudo sed -i 's/set daemon 60/set daemon 10/' /etc/monit/monitrc") logger.info("Restart the Monit service without delaying to monitor.") duthost.shell("sudo systemctl restart monit") yield From 75c42a5f10f653ba1e8d1e19a97c04585f3a45c2 Mon Sep 17 00:00:00 2001 From: Oleksandr Kozodoi Date: Fri, 13 Aug 2021 12:43:01 +0300 Subject: [PATCH 092/117] [fanout]: Added support of AOS fanout (#3982) - Added os_name parameter for switch connection module. - Added new module for running ansible module on the AOS switch Signed-off-by: Oleksandr Kozodoi --- ansible/plugins/action/apswitch.py | 4 +- ansible/plugins/connection/switch.py | 14 +++-- tests/common/devices/aos.py | 92 ++++++++++++++++++++++++++++ tests/common/devices/fanout.py | 7 ++- 4 files changed, 110 insertions(+), 7 deletions(-) create mode 100644 tests/common/devices/aos.py diff --git a/ansible/plugins/action/apswitch.py b/ansible/plugins/action/apswitch.py index ae39718afb5..4b596e70635 100644 --- a/ansible/plugins/action/apswitch.py +++ b/ansible/plugins/action/apswitch.py @@ -26,6 +26,7 @@ def run(self, tmp=None, task_vars=None): _root = boolean(self._task.args.get('root', 'no')) _reboot = boolean(self._task.args.get('reboot', 'no')) _timeout = self._task.args.get('timeout', None) + _os_name = self._task.args.get('os_name', '') if (type(_login) == unicode): _login = ast.literal_eval(_login) @@ -58,7 +59,8 @@ def run(self, tmp=None, task_vars=None): su=_su, root=_root, reboot=_reboot, - timeout=_timeout) + timeout=_timeout, + os_name=_os_name) return result diff --git a/ansible/plugins/connection/switch.py b/ansible/plugins/connection/switch.py index ba6ee521df4..e1c4ce8c952 100644 --- a/ansible/plugins/connection/switch.py +++ b/ansible/plugins/connection/switch.py @@ -22,7 +22,6 @@ class Connection(ConnectionBase): def __init__(self, *args, **kwargs): super(Connection, self).__init__(*args, **kwargs) - self.host = self._play_context.remote_addr self.connection_retry_interval = 60 @@ -54,6 +53,11 @@ def _build_command(self): self._ssh_command += ['-o', 'GSSAPIAuthentication=no', '-o', 'PubkeyAuthentication=no'] + + if 'aos' in self.os_name: + self._ssh_command += ['-o', 'KexAlgorithms=+diffie-hellman-group1-sha1', + '-o', 'HostKeyAlgorithms=+ssh-dss'] + self._ssh_command += ['-o', 'ConnectTimeout=' + str(self.timeout)] def _remove_unprintable(self, buff): @@ -120,6 +124,8 @@ def _spawn_connect(self): self.sku = 'mlnx_os' elif 'Dell' in client.before: self.sku = 'dell' + elif 'aos' in self.os_name: + self.sku = 'aos' else: raise AnsibleError("Unable to determine fanout SKU") break @@ -180,7 +186,7 @@ def _spawn_connect(self): self._display.vvv("Entered bash with root", host=self.host) else: self._display.vvv("Entered bash", host=self.host) - elif self.sku == "eos": + elif self.sku == "eos" or self.sku == "aos": client.sendline('bash') client.expect(['\$ ']) else: @@ -189,7 +195,6 @@ def _spawn_connect(self): return client def exec_command(self, *args, **kwargs): - self.template = kwargs['template'] if kwargs['host'] is not None: self.host = kwargs['host'] @@ -198,6 +203,7 @@ def exec_command(self, *args, **kwargs): self.bash = kwargs['bash'] self.su = kwargs['su'] self.reboot = kwargs['reboot'] + self.os_name = kwargs['os_name'] if kwargs['root']: self.login['user'] = 'root' if kwargs['timeout']: @@ -217,7 +223,7 @@ def exec_command(self, *args, **kwargs): if self.sku == 'nxos': # bash-3.2$ for nexus 6.5 prompts = ['bash-3\.2\$', 'bash-3\.2#'] - elif self.sku == 'eos': + elif self.sku == 'eos' or self.sku == "aos": prompts = ['\$ '] if self.sku in ('mlnx_os',): diff --git a/tests/common/devices/aos.py b/tests/common/devices/aos.py new file mode 100644 index 00000000000..cf93038bd51 --- /dev/null +++ b/tests/common/devices/aos.py @@ -0,0 +1,92 @@ +import json +import logging + + +class AosHost(): + """ + @summary: Class for Accton switch + For running ansible module on the Accton switch + """ + + def __init__(self, ansible_adhoc, hostname, user, passwd, gather_facts=False): + self.hostname = hostname + self.user = user + self.passwd = passwd + self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"] + + def _exec_jinja_template(self, task_name, jinja_template): + inventory = 'lab' + ansible_root = '../ansible/' + playbook_name = 'accton_os_cmd_exec.yml' + jinja_name = 'accton_os_cmd_exec.j2' + playbook_text = '- hosts: {}\n'.format(self.hostname) + \ + ' gather_facts: no\n\n' + \ + ' tasks:\n' + \ + ' - conn_graph_facts: host={{ inventory_hostname }}\n' + \ + ' delegate_to: localhost\n' + \ + ' tags: always\n\n' + \ + ' - set_fact:\n' + \ + ' peer_host: \"{{device_info[inventory_hostname][\'mgmtip\']}}\"\n' + \ + ' peer_hwsku: \"{{device_info[inventory_hostname][\'HwSku\']}}\"\n\n' + \ + ' - name: {}\n'.format(task_name) + \ + ' action: apswitch template={}\n'.format(jinja_name) + \ + ' args:\n' + \ + ' host: \"{{peer_host}}\"\n' + \ + ' login: \"{{switch_login[hwsku_map[peer_hwsku]]}}\"\n' + \ + ' os_name: {}\n'.format('aos') + \ + ' connection: switch\n' + + with open(ansible_root + jinja_name, 'w') as f: + f.write(jinja_template) + + with open(ansible_root + playbook_name, 'w') as f: + f.write(playbook_text) + + res = self.exec_template(ansible_root, playbook_name, inventory) + + os.system("rm {}".format(ansible_root + jinja_name)) + os.system("rm {}".format(ansible_root + playbook_name)) + + return res + + def shutdown(self, interface_name): + task_name = 'Shutdown interface {}'.format(interface_name) + template = 'configure\n' + \ + ' interface {}\n'.format(interface_name) + \ + ' shutdown\n' + \ + ' exit\n' + \ + 'exit\n' + \ + 'exit\n' + out = self._exec_jinja_template(task_name, template) + logging.info('Shut interface {}'.format(interface_name)) + return {self.hostname : out } + + def no_shutdown(self, interface_name): + task_name = 'No shutdown interface {}'.format(interface_name) + template = 'configure\n' + \ + ' interface {}\n'.format(interface_name) + \ + ' no shutdown\n' + \ + ' exit\n' + \ + 'exit\n' + \ + 'exit\n' + out = self._exec_jinja_template(task_name, template) + logging.info('No shut interface {}'.format(interface_name)) + return {self.hostname : out } + + def command(self, cmd): + task_name = 'Execute command \'{}\''.format(cmd) + out = self._exec_jinja_template(task_name, cmd) + logging.info('Exec command: \'{}\''.format(cmd)) + return {self.hostname : out } + + def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs): + """ + Execute ansible playbook with specified parameters + """ + playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvv' + cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory, + fanout_host=self.hostname, extra_vars=json.dumps(kwargs)) + res = self.localhost.shell(cli_cmd) + + if res["localhost"]["rc"] != 0: + raise Exception("Unable to execute template\n{}".format(res["localhost"]["stdout"])) diff --git a/tests/common/devices/fanout.py b/tests/common/devices/fanout.py index ca03e8e9fd9..4acfe0fecca 100644 --- a/tests/common/devices/fanout.py +++ b/tests/common/devices/fanout.py @@ -1,10 +1,10 @@ - import logging from tests.common.devices.sonic import SonicHost from tests.common.devices.onyx import OnyxHost from tests.common.devices.ixia import IxiaHost from tests.common.devices.eos import EosHost +from tests.common.devices.aos import AosHost logger = logging.getLogger(__name__) @@ -33,6 +33,9 @@ def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd, shell # TODO: add ixia chassis abstraction self.os = os self.host = IxiaHost(ansible_adhoc, os, hostname, device_type) + elif os == 'aos': + self.os = os + self.host = AosHost(ansible_adhoc, hostname, user, passwd) else: # Use eos host if the os type is unknown self.os = 'eos' @@ -138,7 +141,7 @@ def get_auto_negotiation_mode(self, interface_name): interface_name (str): Interface name Returns: - boolean: True if auto negotiation mode is enabled else False. Return None if + boolean: True if auto negotiation mode is enabled else False. Return None if the auto negotiation mode is unknown or unsupported. """ return self.host.get_auto_negotiation_mode(interface_name) From 01a83369f61ade171459d2ef908180383028e047 Mon Sep 17 00:00:00 2001 From: AndoniSanguesa <31708881+AndoniSanguesa@users.noreply.github.com> Date: Fri, 13 Aug 2021 02:56:45 -0700 Subject: [PATCH 093/117] [testbed]: delay after config reload for SONiC neighbor KVM setup (#3937) On our team, we were experiencing issues with the leaf nodes' physical interfaces not being configured correctly after the topology deployment on the KVM testbed. Issue seemed to arise from restarting the bgp service before the updated config had been loaded on some nodes. This is likely due to CPU time being low on the VMs we are using, may be helpful to others that run into similar issues. How did you do it? Added one minute delay between the config reload and the restart for the bgp service. Co-authored-by: Andoni Sanguesa --- ansible/roles/sonic/handlers/main.yml | 4 ++++ ansible/roles/sonic/tasks/vsonic.yml | 1 + 2 files changed, 5 insertions(+) diff --git a/ansible/roles/sonic/handlers/main.yml b/ansible/roles/sonic/handlers/main.yml index 7b9451ad6fc..a063302918e 100755 --- a/ansible/roles/sonic/handlers/main.yml +++ b/ansible/roles/sonic/handlers/main.yml @@ -6,6 +6,10 @@ become: yes listen: "Update config db" +- name: wait for SONiC update config db to finish + pause: + seconds: 180 + - name: SONiC restart BGP service become: true service: name=bgp diff --git a/ansible/roles/sonic/tasks/vsonic.yml b/ansible/roles/sonic/tasks/vsonic.yml index 3a9fe4d3b3c..83c1ce443e1 100644 --- a/ansible/roles/sonic/tasks/vsonic.yml +++ b/ansible/roles/sonic/tasks/vsonic.yml @@ -48,6 +48,7 @@ when: hostname in configuration notify: - Update config db + - wait for SONiC update config db to finish - name: create frr config template: src="frr-{{ topo }}-{{ props.swrole }}.j2" From a1f47ab206636df539d1a0390600e11b98066f41 Mon Sep 17 00:00:00 2001 From: roman_savchuk Date: Fri, 13 Aug 2021 13:00:57 +0300 Subject: [PATCH 094/117] [ test_autonegotiation ] Added missing topology mark (#3780) Signed-off-by: Roman Savchuk --- tests/platform_tests/test_auto_negotiation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/platform_tests/test_auto_negotiation.py b/tests/platform_tests/test_auto_negotiation.py index 3bb88b4130c..d50784bdde0 100644 --- a/tests/platform_tests/test_auto_negotiation.py +++ b/tests/platform_tests/test_auto_negotiation.py @@ -17,6 +17,10 @@ from tests.common.utilities import wait_until from tests.platform_tests.link_flap.link_flap_utils import build_test_candidates +pytestmark = [ + pytest.mark.topology('any'), +] + logger = logging.getLogger(__name__) STATE_DB = 'STATE_DB' From f601361db8b05e4948789651236727ad2708755a Mon Sep 17 00:00:00 2001 From: Vincent Chiang <47546216+vincentchiang-ec@users.noreply.github.com> Date: Fri, 13 Aug 2021 18:02:05 +0800 Subject: [PATCH 095/117] [test_warm_reboot_sad_bgp] add "OpenSent" as one of the expected neighbor state (#3882) Sometimes test_warm_reboot_sad_bgp is failed because BGP neighbor state is not down in preboot state, it is due to the BGP neighbor state is "OpenSent" not "Active". VM still sends OPEN message after DUT BGP shutdown, and there is a chance to get the BGP state is "OpenSent". --- ansible/roles/test/files/ptftests/arista.py | 2 +- ansible/roles/test/files/ptftests/sad_path.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py index 0dc67768491..211625ea7f5 100644 --- a/ansible/roles/test/files/ptftests/arista.py +++ b/ansible/roles/test/files/ptftests/arista.py @@ -453,7 +453,7 @@ def verify_bgp_neigh_state(self, dut=None, state="Active"): if 'vrfs' in obj and 'default' in obj['vrfs']: obj = obj['vrfs']['default'] if 'peers' in obj: - bgp_state[ver] = (obj['peers'][dut[ver]]['peerState'] == state) + bgp_state[ver] = (obj['peers'][dut[ver]]['peerState'] in state) else: self.fails.add('Verify BGP %S neighbor: Peer attribute missing in output' % ver) else: diff --git a/ansible/roles/test/files/ptftests/sad_path.py b/ansible/roles/test/files/ptftests/sad_path.py index 7f91fe0a7ba..73fe07cef70 100644 --- a/ansible/roles/test/files/ptftests/sad_path.py +++ b/ansible/roles/test/files/ptftests/sad_path.py @@ -214,7 +214,7 @@ def populate_bgp_state(self): self.dut_bgps['changed_state'] = 'Active' [self.dut_needed.update({vm:None}) for vm in self.neigh_vms] elif self.oper_type == 'dut_bgp_down': - self.neigh_bgps['changed_state'] = 'Active' + self.neigh_bgps['changed_state'] = 'Active,OpenSent' self.dut_bgps['changed_state'] = 'Idle' elif 'neigh_lag' in self.oper_type: # on the DUT side, bgp states are different pre and post boot. hence passing multiple values From 81fc44aacfdcae4893990c68a1309e633f231d4a Mon Sep 17 00:00:00 2001 From: Renu Falodiya <73910672+oxygen980@users.noreply.github.com> Date: Fri, 13 Aug 2021 10:04:06 +0000 Subject: [PATCH 096/117] ipfwd: In 'conftest.py' Fixed format error (#3769) --- tests/ipfwd/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ipfwd/conftest.py b/tests/ipfwd/conftest.py index 773819d21d5..04aaee0e7a5 100644 --- a/tests/ipfwd/conftest.py +++ b/tests/ipfwd/conftest.py @@ -159,7 +159,7 @@ def gather_facts(tbinfo, duthosts, enum_rand_one_per_hwsku_frontend_hostname, en facts.update(dst_port_facts) if src is None or dst is None: - pytest.fail("Did not find 2 lag or interfaces that are up on host {}".duthost.hostname) + pytest.fail("Did not find 2 lag or interfaces that are up on host {}".format(duthost.hostname)) logger.info("gathered_new_facts={}".format(json.dumps(facts, indent=2))) yield facts From 7752818d3f9df09d1f3fc28b7442a9cc0a4c368e Mon Sep 17 00:00:00 2001 From: "Nana@Nvidia" <78413612+nhe-NV@users.noreply.github.com> Date: Fri, 13 Aug 2021 18:04:46 +0800 Subject: [PATCH 097/117] [Mellanox] Add support for a new Mellanox HWSKU Mellanox-SN4600C-D112C8 (#3940) --- ansible/group_vars/sonic/variables | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index a504af68633..79910419995 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -14,7 +14,7 @@ broadcom_th3_hwskus: ['DellEMC-Z9332f-M-O16C64', 'DellEMC-Z9332f-O32'] mellanox_spc1_hwskus: [ 'ACS-MSN2700', 'ACS-MSN2740', 'ACS-MSN2100', 'ACS-MSN2410', 'ACS-MSN2010', 'Mellanox-SN2700', 'Mellanox-SN2700-D48C8' ] mellanox_spc2_hwskus: [ 'ACS-MSN3700', 'ACS-MSN3700C', 'ACS-MSN3800', 'Mellanox-SN3800-D112C8' , 'ACS-MSN3420'] -mellanox_spc3_hwskus: [ 'ACS-MSN4700', 'ACS-MSN4600C', 'ACS-MSN4410' ] +mellanox_spc3_hwskus: [ 'ACS-MSN4700', 'ACS-MSN4600C', 'ACS-MSN4410', 'Mellanox-SN4600C-D112C8'] mellanox_hwskus: "{{ mellanox_spc1_hwskus + mellanox_spc2_hwskus + mellanox_spc3_hwskus }}" cavium_hwskus: [ "AS7512", "XP-SIM" ] From 794f32f03fa3b61c9a975d4f254dc26cf627835b Mon Sep 17 00:00:00 2001 From: stephengao-ragilenetworks <87261920+stephengao-ragilenetworks@users.noreply.github.com> Date: Fri, 13 Aug 2021 18:08:33 +0800 Subject: [PATCH 098/117] [Ragile]: Add new platform RA-B6510-48V8C (#3765) --- ansible/module_utils/port_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index 2a230d5c9e5..5ec3fa32d7c 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -204,7 +204,7 @@ def get_port_alias_to_name_map(hwsku, asic_id=None): else: for i in range(1,9): port_alias_to_name_map["Ethernet1/%d" % i] = "Ethernet%d" % ((i - 1) * 4) - elif hwsku == "B6510-48VS8CQ": + elif hwsku == "B6510-48VS8CQ" or "RA-B6510-48V8C": for i in range(1,49): port_alias_to_name_map["twentyfiveGigE0/%d" % i] = "Ethernet%d" % i for i in range(49,57): From f6b5a0ca2bb186bcf4af414235f2fd6001019db7 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Fri, 13 Aug 2021 18:10:38 +0800 Subject: [PATCH 099/117] [fanout] Update errdisable detect cause from link-flap to link-change (#3711) On later version of EOS, command "no errdisable detect cause link-flap" has been deprecated. On older version of EOS, both link-flap and link-change are supported. However, they are mutually exclusive. This change replaced the "no errdisable detect cause link-flap" command with the more future proof "no errdisable detect cause link-change". Signed-off-by: Xin Wang --- ansible/roles/fanout/templates/arista_7060_deploy.j2 | 3 ++- ansible/roles/fanout/templates/arista_7260_deploy.j2 | 3 ++- ansible/roles/fanout/templates/arista_7260cx3_deploy.j2 | 5 +++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/ansible/roles/fanout/templates/arista_7060_deploy.j2 b/ansible/roles/fanout/templates/arista_7060_deploy.j2 index 02350185af1..b1dd68a5e5b 100644 --- a/ansible/roles/fanout/templates/arista_7060_deploy.j2 +++ b/ansible/roles/fanout/templates/arista_7060_deploy.j2 @@ -1,5 +1,6 @@ ! no errdisable detect cause link-flap +no errdisable detect cause link-change ! no schedule tech-support ! @@ -87,7 +88,7 @@ interface {{ intf }} ! interface Management 1 description TO LAB MGMT SWITCH - ip address {{ device_info[inventory_hostname]["ManagementIp"] }} + ip address {{ device_info[inventory_hostname]["ManagementIp"] }} no shutdown ! # LACP packets pass through diff --git a/ansible/roles/fanout/templates/arista_7260_deploy.j2 b/ansible/roles/fanout/templates/arista_7260_deploy.j2 index 93dc411f66f..600bf6dcf0f 100644 --- a/ansible/roles/fanout/templates/arista_7260_deploy.j2 +++ b/ansible/roles/fanout/templates/arista_7260_deploy.j2 @@ -3,6 +3,7 @@ ! ! no errdisable detect cause link-flap +no errdisable detect cause link-change ! no schedule tech-support ! @@ -54,7 +55,7 @@ interface {{ intf }} ! interface Management 1 description TO LAB MGMT SWITCH - ip address {{ device_info[inventory_hostname]["ManagementIp"] }} + ip address {{ device_info[inventory_hostname]["ManagementIp"] }} no shutdown ! # LACP packets pass through diff --git a/ansible/roles/fanout/templates/arista_7260cx3_deploy.j2 b/ansible/roles/fanout/templates/arista_7260cx3_deploy.j2 index 56bb5b2731f..314a315df53 100644 --- a/ansible/roles/fanout/templates/arista_7260cx3_deploy.j2 +++ b/ansible/roles/fanout/templates/arista_7260cx3_deploy.j2 @@ -1,5 +1,6 @@ ! no errdisable detect cause link-flap +no errdisable detect cause link-change ! no schedule tech-support ! @@ -60,7 +61,7 @@ vrf definition management speed forced 10gfull no shutdown ! -{% endfor %} +{% endfor %} {% elif device_conn[inventory_hostname][intf]['speed'] == "50000" %} interface {{ intf }} description {{ device_conn[inventory_hostname][intf]['peerdevice'] }}-{{ device_conn[inventory_hostname][intf]['peerport'] }} @@ -110,7 +111,7 @@ interface {{ intf }} ! interface Management 1 description TO LAB MGMT SWITCH - ip address {{ device_info[inventory_hostname]["ManagementIp"] }} + ip address {{ device_info[inventory_hostname]["ManagementIp"] }} no shutdown ! # LACP packets pass through From 8788d4061a1e2d8eb8d48ff6724c0567eec13844 Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Fri, 13 Aug 2021 14:19:14 -0700 Subject: [PATCH 100/117] remove the use of config_db to get the port indicies (#4012) The PR #3897 introduced a change to get the port indices from config_db. This causes some test regressions for single ASIC testbeds. The following change are done in this commit: - The port_index_map for single asic and multi asic will not use the index in config_db - For multi-asic, the indicies will be generated by adding a asic_offset to make sure the index is different for every asic. Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- ansible/library/config_facts.py | 34 ++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index 568ccf38862..5152715e60b 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -2,6 +2,12 @@ import json from collections import defaultdict from natsort import natsorted +from ansible.module_utils.port_utils import get_port_indices_for_asic + +try: + from sonic_py_common import multi_asic +except ImportError: + print("Failed to import multi_asic") DOCUMENTATION = ''' --- @@ -61,7 +67,7 @@ def format_config(json_data): return res -def create_maps(config): +def create_maps(config, namespace): """ Create a map of SONiC port name to physical port index """ port_index_map = {} port_name_to_alias_map = {} @@ -71,13 +77,19 @@ def create_maps(config): port_name_list = config["PORT"].keys() port_name_list_sorted = natsorted(port_name_list) - #get the port_index from config_db if available - port_index_map = { - name: int(v['index']) - 1 - for name, v in config['PORT'].items() - if 'index' in v - } - if not port_index_map: + try: + multi_asic_device = multi_asic.is_multi_asic() + except Exception: + multi_asic_device = False + + + if multi_asic_device: + asic_id = 0 + if namespace is not None: + asic_id = namespace.split("asic")[1] + port_index_map = get_port_indices_for_asic(asic_id, + port_name_list_sorted) + else: #if not available generate an index for idx, val in enumerate(port_name_list_sorted): port_index_map[val] = idx @@ -105,7 +117,7 @@ def get_running_config(module, namespace): return json_info -def get_facts(config): +def get_facts(config, namespace): """ Create the facts dict """ Tree = lambda: defaultdict(Tree) @@ -114,7 +126,7 @@ def get_facts(config): results.update(format_config(config)) - results.update(create_maps(config)) + results.update(create_maps(config, namespace)) return results @@ -146,7 +158,7 @@ def main(): config = json.load(f) elif m_args["source"] == "running": config = get_running_config(module, namespace) - results = get_facts(config) + results = get_facts(config, namespace) module.exit_json(ansible_facts=results) except Exception as e: module.fail_json(msg=e.message) From 2202f1be9fd877a01898e633ef9c99fb74369289 Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Fri, 13 Aug 2021 14:22:14 -0700 Subject: [PATCH 101/117] [chassis] use unicode to get ipaddress (#4011) What is the motivation for this PR? For the supervisor card, while generating the minigraph the following error is seen fatal: [str2-sonic-sup-1]: FAILED! => {"changed": false, "msg": "failed to find the correct fabric asic info '20.0.0.1' (len 8 != 4) is not permitted as an IPv4 address. Did you pass in a bytes (str in Python 2) instead of a unicode object?"} How did you do it? pass unicode instead of str to ipaddress.IPv4Address Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- ansible/library/fabric_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/library/fabric_info.py b/ansible/library/fabric_info.py index 1b14caed4fc..7b7442c203c 100644 --- a/ansible/library/fabric_info.py +++ b/ansible/library/fabric_info.py @@ -48,8 +48,8 @@ def main(): num_fabric_asic = int( m_args[ 'num_fabric_asic' ] ) v4pfx = str( m_args[ 'asics_host_basepfx' ] ).split("/") v6pfx = str( m_args[ 'asics_host_basepfx6' ] ).split("/") - v4base = int( ipaddress.IPv4Address(v4pfx[0]) ) - v6base = int( ipaddress.IPv6Address(v6pfx[0]) ) + v4base = int( ipaddress.IPv4Address(unicode(v4pfx[0])) ) + v6base = int( ipaddress.IPv6Address(unicode(v6pfx[0])) ) for asic_id in range(num_fabric_asic): key = "ASIC%d" % asic_id next_v4addr = str( ipaddress.IPv4Address(v4base + asic_id) ) From ad40fb1500606600ea9a951ed30c3e5b5c41d1f1 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Thu, 12 Aug 2021 19:13:06 -0700 Subject: [PATCH 102/117] Add ip route format that's present in Bullseye In Bullseye, ip route includes the next hop ID in the output if it's set/available. Add a regex to handle that. Signed-off-by: Saikrishna Arcot --- tests/common/devices/sonic.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 3ca368f9edb..1185c1c2794 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -851,6 +851,13 @@ def get_ip_route_info(self, dstip, ns=""): nexthop via 10.0.0.5 dev PortChannel0002 weight 1 nexthop via 10.0.0.9 dev PortChannel0003 weight 1 nexthop via 10.0.0.13 dev PortChannel0004 weight 1 + +raw data (starting from Bullseye) +192.168.8.0/25 nhid 296 proto bgp src 10.1.0.32 metric 20 + nexthop via 10.0.0.57 dev PortChannel0001 weight 1 + nexthop via 10.0.0.59 dev PortChannel0002 weight 1 + nexthop via 10.0.0.61 dev PortChannel0003 weight 1 + nexthop via 10.0.0.63 dev PortChannel0004 weight 1 ---------------- get_ip_route_info(ipaddress.ip_address(unicode("20c0:a818::"))) returns {'set_src': IPv6Address(u'fc00:1::32'), 'nexthops': [(IPv6Address(u'fc00::1a'), u'PortChannel0004')]} @@ -866,6 +873,13 @@ def get_ip_route_info(self, dstip, ns=""): 20c0:a818::/64 via fc00::a dev PortChannel0002 proto 186 src fc00:1::32 metric 20 pref medium 20c0:a818::/64 via fc00::12 dev PortChannel0003 proto 186 src fc00:1::32 metric 20 pref medium 20c0:a818::/64 via fc00::1a dev PortChannel0004 proto 186 src fc00:1::32 metric 20 pref medium + +raw data (starting from Bullseye) +20c0:a818::/64 nhid 224 proto bgp src fc00:1::32 metric 20 pref medium + nexthop via fc00::72 dev PortChannel0001 weight 1 + nexthop via fc00::76 dev PortChannel0002 weight 1 + nexthop via fc00::7a dev PortChannel0003 weight 1 + nexthop via fc00::7e dev PortChannel0004 weight 1 ---------------- get_ip_route_info(ipaddress.ip_network(unicode("0.0.0.0/0"))) returns {'set_src': IPv4Address(u'10.1.0.32'), 'nexthops': [(IPv4Address(u'10.0.0.1'), u'PortChannel0001'), (IPv4Address(u'10.0.0.5'), u'PortChannel0002'), (IPv4Address(u'10.0.0.9'), u'PortChannel0003'), (IPv4Address(u'10.0.0.13'), u'PortChannel0004')]} @@ -876,6 +890,13 @@ def get_ip_route_info(self, dstip, ns=""): nexthop via 10.0.0.5 dev PortChannel0002 weight 1 nexthop via 10.0.0.9 dev PortChannel0003 weight 1 nexthop via 10.0.0.13 dev PortChannel0004 weight 1 + +raw data (starting from Bullseye) +default nhid 296 proto bgp src 10.1.0.32 metric 20 + nexthop via 10.0.0.57 dev PortChannel0001 weight 1 + nexthop via 10.0.0.59 dev PortChannel0002 weight 1 + nexthop via 10.0.0.61 dev PortChannel0003 weight 1 + nexthop via 10.0.0.63 dev PortChannel0004 weight 1 ---------------- get_ip_route_info(ipaddress.ip_network(unicode("::/0"))) returns {'set_src': IPv6Address(u'fc00:1::32'), 'nexthops': [(IPv6Address(u'fc00::2'), u'PortChannel0001'), (IPv6Address(u'fc00::a'), u'PortChannel0002'), (IPv6Address(u'fc00::12'), u'PortChannel0003'), (IPv6Address(u'fc00::1a'), u'PortChannel0004')]} @@ -885,6 +906,13 @@ def get_ip_route_info(self, dstip, ns=""): default via fc00::a dev PortChannel0002 proto 186 src fc00:1::32 metric 20 pref medium default via fc00::12 dev PortChannel0003 proto 186 src fc00:1::32 metric 20 pref medium default via fc00::1a dev PortChannel0004 proto 186 src fc00:1::32 metric 20 pref medium + +raw data (starting from Bullseye) +default nhid 224 proto bgp src fc00:1::32 metric 20 pref medium + nexthop via fc00::72 dev PortChannel0001 weight 1 + nexthop via fc00::76 dev PortChannel0002 weight 1 + nexthop via fc00::7a dev PortChannel0003 weight 1 + nexthop via fc00::7e dev PortChannel0004 weight 1 ---------------- """ @@ -904,10 +932,13 @@ def get_ip_route_info(self, dstip, ns=""): # parse set_src m = re.match(r"^(default|\S+) proto (zebra|bgp|186) src (\S+)", rt[0]) m1 = re.match(r"^(default|\S+) via (\S+) dev (\S+) proto (zebra|bgp|186) src (\S+)", rt[0]) + m2 = re.match(r"^(default|\S+) nhid (\d+) proto (zebra|bgp|186) src (\S+)", rt[0]) if m: rtinfo['set_src'] = ipaddress.ip_address(unicode(m.group(3))) elif m1: rtinfo['set_src'] = ipaddress.ip_address(unicode(m1.group(5))) + elif m2: + rtinfo['set_src'] = ipaddress.ip_address(unicode(m2.group(4))) # parse nexthops for l in rt: From fbde132548d44c76428e42942d02db296519769e Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Fri, 13 Aug 2021 19:42:10 -0700 Subject: [PATCH 103/117] Fix config_facts.py not storing data in certain cases In some cases, depending on the order in which dictionary items get processed, some data that belongs to a single-level key can end up not actually being copied into `data` (the formatted dictionary that's then used by other test scripts). This was actually happening with the VLAN_INTERFACE table where `proxy_arp` was present in sonic-cfggen, but was silently being dropped here. Fix this issue, and fix a test script that would get broken because there are non-IP addresses present now. This was seen with Python 3, where there is a different order in which dictionary items ended up getting processed. Signed-off-by: Saikrishna Arcot --- ansible/library/config_facts.py | 5 ++++- tests/arp/test_arp_dualtor.py | 11 +++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index 568ccf38862..8feb943d2d9 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -54,7 +54,10 @@ def format_config(json_data): data.setdefault(key_l1, {})[key_l2] = entry except ValueError: # This is a single level key - data.setdefault(key, entry) + if key not in data: + data[key] = entry + else: + data[key].update(entry) res.setdefault(table, data) diff --git a/tests/arp/test_arp_dualtor.py b/tests/arp/test_arp_dualtor.py index a96cd78e578..449978881dc 100644 --- a/tests/arp/test_arp_dualtor.py +++ b/tests/arp/test_arp_dualtor.py @@ -25,10 +25,13 @@ def setup_ptf_arp(config_facts, ptfhost, intfs_for_test): intf_ipv4_addr = None for addr in vlan_addrs: - if type(ip_network(addr, strict=False)) is IPv6Network: - intf_ipv6_addr = ip_network(addr, strict=False) - elif type(ip_network(addr, strict=False)) is IPv4Network: - intf_ipv4_addr = ip_network(addr, strict=False) + try: + if type(ip_network(addr, strict=False)) is IPv6Network: + intf_ipv6_addr = ip_network(addr, strict=False) + elif type(ip_network(addr, strict=False)) is IPv4Network: + intf_ipv4_addr = ip_network(addr, strict=False) + except ValueError: + continue # The VLAN interface on the DUT has an x.x.x.1 address assigned (or x::1 in the case of IPv6) # But the network_address property returns an x.x.x.0 address (or x::0 for IPv6) so we increment by two to avoid conflict From f1b3876f7ef7b39a69e1a5f769e1744fed49499d Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Sat, 14 Aug 2021 07:48:55 -0700 Subject: [PATCH 104/117] [kvmtest]: add route/test_static_route.py to topo t0 (#4038) Signed-off-by: Guohan Lu --- tests/kvmtest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh index a26f659aaf1..e0ea31454d8 100755 --- a/tests/kvmtest.sh +++ b/tests/kvmtest.sh @@ -112,6 +112,7 @@ test_t0() { pc/test_po_cleanup.py \ pc/test_po_update.py \ route/test_default_route.py \ + route/test_static_route.py \ arp/test_neighbor_mac.py \ arp/test_neighbor_mac_noptf.py \ snmp/test_snmp_cpu.py \ From 2dd78b0f33cde1f36cd4fb2dc8fc6efcdb763b9a Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Sat, 14 Aug 2021 07:59:13 -0700 Subject: [PATCH 105/117] [dualtor] Fix incorrect FDB used count in test_standby_tor_upstream_mux_toggle (#4042) Summary: Use server MAC instead of default MAC to send packet. This is to avoid extra use of FDB resources and prevent test failure due to that. #### What is the motivation for this PR? Failure seen in `test_standby_tor_upstream_mux_toggle`, where test is expecting no increase in count for CRM resources (fdb_entry), but there is ONE extra fdb_entry ``` > pt_assert(len(unmatched_crm_facts)==0, 'Unmatched CRM facts: {}'.format(json.dumps(unmatched_crm_facts, indent=4))) E Failed: Unmatched CRM facts: [ E { E "right": { E "fdb_entry": { E "available": 32742, E "used": 25 E } E }, E "left": { E "fdb_entry": { E "available": 32743, E "used": 24 E } E } E } E ] PKT_NUM = 100 crm_facts0 = {'acl_group': [{'available count': '996', 'bind point': 'PORT', 'resource name': 'acl_group', 'stage': 'INGRESS', ...}...'used': 24}, 'ipmc_entry': {'available': 8192, 'used': 0}, 'ipv4_neighbor': {'available': 16352, 'used': 4}, ...}, ...} crm_facts1 = {'acl_group': [{'available count': '996', 'bind point': 'PORT', 'resource name': 'acl_group', 'stage': 'INGRESS', ...}...'used': 25}, 'ipmc_entry': {'available': 8192, 'used': 0}, 'ipv4_neighbor': {'available': 16352, 'used': 4}, ...}, ...} ip = {'server_ipv4': '192.168.0.6/32', 'server_ipv6': 'fc02:1000::6/128', 'state': 'auto'} itfs = 'Ethernet20' ptfadapter = rand_selected_dut = str2-7050cx3-acs-01 rand_selected_interface = ('Ethernet20', {'server_ipv4': '192.168.0.6/32', 'server_ipv6': 'fc02:1000::6/128', 'state': 'auto'}) require_mocked_dualtor = None set_crm_polling_interval = None tbinfo = {'auto_recover': 'True', 'comment': 'lawlee', 'conf-name': 'vms20-t0-7050cx3-1', 'duts': ['str2-7050cx3-acs-01'], ...} toggle_all_simulator_ports = unmatched_crm_facts = [{'left': {'fdb_entry': {'available': 32743, 'used': 24}}, 'right': {'fdb_entry': {'available': 32742, 'used': 25}}}] dualtor/test_standby_tor_upstream_mux_toggle.py:106: Failed ``` #### How did you do it? The extra FDB entry seems to be created by the test when a packet is send without src_address. If `simple_ip_packet` does not receive a src MAC as an argument, the default is set to `00:06:07:08:09:0A` (https://github.com/p4lang/ptf/blob/master/src/ptf/testutils.py#L1756) ``` admin@str2-7050cx3-acs-01:~$ fdbshow No. Vlan MacAddress Port Type ----- ------ ----------------- ---------- ------- 1 1000 98:03:9B:03:22:12 Ethernet72 Dynamic 2 1000 98:03:9B:03:22:07 Ethernet28 Dynamic 3 1000 98:03:9B:03:22:08 Ethernet32 Dynamic 4 1000 98:03:9B:03:22:18 Ethernet96 Dynamic 5 1000 98:03:9B:03:22:02 Ethernet8 Dynamic 6 1000 98:03:9B:03:22:17 Ethernet92 Dynamic 7 1000 98:03:9B:03:22:16 Ethernet88 Dynamic 8 1000 98:03:9B:03:22:0B Ethernet44 Dynamic 9 1000 98:03:9B:03:22:0D Ethernet52 Dynamic 10 1000 98:03:9B:03:22:14 Ethernet80 Dynamic 11 1000 98:03:9B:03:22:0F Ethernet60 Dynamic 12 1000 98:03:9B:03:22:13 Ethernet76 Dynamic 13 1000 98:03:9B:03:22:10 Ethernet64 Dynamic 14 1000 98:03:9B:03:22:09 Ethernet36 Dynamic 15 1000 98:03:9B:03:22:05 Ethernet20 Dynamic <<<---------- SERVER MAC 16 1000 98:03:9B:03:22:15 Ethernet84 Dynamic 17 1000 98:03:9B:03:22:03 Ethernet12 Dynamic 18 1000 98:03:9B:03:22:04 Ethernet16 Dynamic 19 1000 98:03:9B:03:22:01 Ethernet4 Dynamic 20 1000 98:03:9B:03:22:11 Ethernet68 Dynamic 21 1000 98:03:9B:03:22:0A Ethernet40 Dynamic 22 1000 98:03:9B:03:22:06 Ethernet24 Dynamic 23 1000 00:06:07:08:09:0A Ethernet20 Dynamic <<<---------- TEST MAC 24 1000 98:03:9B:03:22:0C Ethernet48 Dynamic 25 1000 98:03:9B:03:22:0E Ethernet56 Dynamic Total number of entries 25 admin@str2-7050cx3-acs-01:~$ ``` This is THAT extra MAC which uses FDB resources, and fails the test. However, this is not totally incorrect behavior, but it can be improved if `simple_ip_packet` is called with SRC MAC of the server which is attached to the VLAN port. So, this change finds the MAC of the ptf port, and uses it to create and send upstream packets. --- tests/common/dualtor/dual_tor_utils.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index c6b598101cb..c122ad7d4de 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -807,8 +807,12 @@ def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, pkt_num = vlan_name = list(vlan_table.keys())[0] vlan_mac = host.get_dut_iface_mac(vlan_name) router_mac = host.facts['router_mac'] + mg_facts = host.get_extended_minigraph_facts(tbinfo) + tx_port = mg_facts['minigraph_ptf_indices'][itfs] + eth_src = ptfadapter.dataplane.get_mac(0, tx_port) # Generate packets from server to a random IP address, which goes default routes - pkt = testutils.simple_ip_packet(eth_dst=vlan_mac, + pkt = testutils.simple_ip_packet(eth_src=eth_src, + eth_dst=vlan_mac, ip_src=server_ip, ip_dst=random_ip) # Generate packet forwarded to portchannels @@ -837,8 +841,6 @@ def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, pkt_num = rx_ports += v rx_ports = [int(x.strip('eth')) for x in rx_ports] - mg_facts = host.get_extended_minigraph_facts(tbinfo) - tx_port = mg_facts['minigraph_ptf_indices'][itfs] logger.info("Verifying upstream traffic. packet number = {} interface = {} server_ip = {} expect_drop = {}".format(pkt_num, itfs, server_ip, drop)) for i in range(0, pkt_num): ptfadapter.dataplane.flush() From 88283b35f8ce123a4c968142dbe4a42bc006231c Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Sat, 14 Aug 2021 09:02:17 -0700 Subject: [PATCH 106/117] [dualtor] Remove acl-drop verification from mux_toggle testcase (#4041) Removed drop counter verification from the test as the counters will not be increased in this case. --- .../test_standby_tor_upstream_mux_toggle.py | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py index 5a29f855122..10b0f67c9b6 100644 --- a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py +++ b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py @@ -1,8 +1,6 @@ import pytest import logging -import ipaddress import json -import re import time from tests.common.dualtor.dual_tor_mock import * from tests.common.helpers.assertions import pytest_assert as pt_assert @@ -21,22 +19,6 @@ PAUSE_TIME = 10 -def get_l2_rx_drop(host, itfs): - """ - Return L2 rx packet drop counter for given interface - """ - res = {} - stdout = host.shell("portstat -j")['stdout'] - match = re.search("Last cached time was.*\n", stdout) - if match: - stdout = re.sub("Last cached time was.*\n", "", stdout) - data = json.loads(stdout) - return int(data[itfs]['RX_DRP']) - - -def clear_portstat(dut): - dut.shell("portstat -c") - @pytest.fixture(scope='module', autouse=True) def test_cleanup(rand_selected_dut): @@ -67,11 +49,6 @@ def test_standby_tor_upstream_mux_toggle( drop=True) time.sleep(5) - # Verify dropcounter is increased - drop_counter = get_l2_rx_drop(rand_selected_dut, itfs) - pt_assert(drop_counter >= PKT_NUM, - "RX_DRP for {} is expected to increase by {} actually {}".format(itfs, PKT_NUM, drop_counter)) - # Step 2. Toggle mux state to active, and verify traffic is not dropped by ACL and fwd-ed to uplinks; verify CRM show and no nexthop objects are stale set_mux_state(rand_selected_dut, tbinfo, 'active', [itfs], toggle_all_simulator_ports) # Wait sometime for mux toggle @@ -97,10 +74,6 @@ def test_standby_tor_upstream_mux_toggle( server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, drop=True) - # Verify dropcounter is increased - drop_counter = get_l2_rx_drop(rand_selected_dut, itfs) - pt_assert(drop_counter >= PKT_NUM, - "RX_DRP for {} is expected to increase by {} actually {}".format(itfs, PKT_NUM, drop_counter)) crm_facts1 = rand_selected_dut.get_crm_facts() unmatched_crm_facts = compare_crm_facts(crm_facts0, crm_facts1) pt_assert(len(unmatched_crm_facts)==0, 'Unmatched CRM facts: {}'.format(json.dumps(unmatched_crm_facts, indent=4))) From e1ba4b2e233ee114f856daeb8f7642ba743fdbaa Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Sun, 15 Aug 2021 00:03:47 +0800 Subject: [PATCH 107/117] Add skip_vendor_specific_container argument option for test_monitoring_critical_processe (#4007) When there are some vendor specific container installed in DUT, after run test_monitoring_critical_processes case, it will raise some error logs like " Process 'wjhd' is not running in namespace 'host'.* ". So we add skip_vendor_specific_container option to skip the specified container check. --- tests/process_monitoring/conftest.py | 28 +++++++++++++++++++ .../test_critical_process_monitoring.py | 3 +- 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 tests/process_monitoring/conftest.py diff --git a/tests/process_monitoring/conftest.py b/tests/process_monitoring/conftest.py new file mode 100644 index 00000000000..a8b7372f810 --- /dev/null +++ b/tests/process_monitoring/conftest.py @@ -0,0 +1,28 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--skip_vendor_specific_container", + action="store", + default="", + required=False, + help="skip vendor specific container list" + ) + + +@pytest.fixture(scope="module", autouse=True) +def skip_vendor_specific_container(request): + """ This fixture is to get the skipping vendor container list and return the container information + + For example: + pytest --skip_vendor_specific_container "container1, container2" + pytest --skip_vendor_specific_container container1, container2 + + """ + skip_vendor_specific_container_opt = request.config.getoption("--skip_vendor_specific_container", default="") + vendor_specific_container_list = [] + if skip_vendor_specific_container_opt: + vendor_specific_container_list = [container.strip() for container in skip_vendor_specific_container_opt.split(",")] + + return vendor_specific_container_list diff --git a/tests/process_monitoring/test_critical_process_monitoring.py b/tests/process_monitoring/test_critical_process_monitoring.py index d1d411c66bb..f0ad919f1fa 100755 --- a/tests/process_monitoring/test_critical_process_monitoring.py +++ b/tests/process_monitoring/test_critical_process_monitoring.py @@ -518,7 +518,7 @@ def ensure_all_critical_processes_running(duthost, containers_in_namespaces): ensure_process_is_running(duthost, container_name_in_namespace, program_name) -def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo): +def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo, skip_vendor_specific_container): """Tests the feature of monitoring critical processes by Monit and Supervisord. This function will check whether names of critical processes will appear @@ -551,6 +551,7 @@ def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo): # Skip 'radv' container on devices whose role is not T0. if tbinfo["topo"]["type"] != "t0": skip_containers.append("radv") + skip_containers = skip_containers + skip_vendor_specific_container containers_in_namespaces = get_containers_namespace_ids(duthost, skip_containers) From d1bd0fcc1c57b5889d68da5307f7d396988772c2 Mon Sep 17 00:00:00 2001 From: "Nana@Nvidia" <78413612+nhe-NV@users.noreply.github.com> Date: Sun, 15 Aug 2021 01:16:23 +0800 Subject: [PATCH 108/117] [test_bgpmon.py]Increase the wait time for the bgp mon session to come up (#3924) --- tests/bgp/test_bgpmon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/bgp/test_bgpmon.py b/tests/bgp/test_bgpmon.py index a05c9c2216b..ba37aed2470 100644 --- a/tests/bgp/test_bgpmon.py +++ b/tests/bgp/test_bgpmon.py @@ -144,7 +144,7 @@ def bgpmon_peer_connected(duthost, bgpmon_peer): try: pytest_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT), "Failed to start bgp monitor session on PTF") - pytest_assert(wait_until(30, 5, bgpmon_peer_connected, duthost, peer_addr),"BGPMon Peer connection not established") + pytest_assert(wait_until(180, 5, bgpmon_peer_connected, duthost, peer_addr),"BGPMon Peer connection not established") finally: ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent") ptfhost.shell("ip route del %s dev %s" % (local_addr + "/32", ptf_interface)) From 04353a73fcd2de9b3049b23d30237586a4d26531 Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Sat, 14 Aug 2021 14:42:05 -0700 Subject: [PATCH 109/117] Fix duplicate module name (#4044) Duplicate module name introduced as part of #3998 is causing fanoutHost object creation failure --- tests/common/devices/sonic.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index cd99fddde4e..28bd682b18b 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -10,7 +10,7 @@ from collections import defaultdict from datetime import datetime -from ansible import constants +from ansible import constants as ansible_constants from ansible.plugins.loader import connection_loader from tests.common.devices.base import AnsibleHostBase @@ -48,10 +48,10 @@ def __init__(self, ansible_adhoc, hostname, # parse connection options and reset those options with # passed credentials connection_loader.get(sonic_conn, class_only=True) - user_def = constants.config.get_configuration_definition( + user_def = ansible_constants.config.get_configuration_definition( "remote_user", "connection", sonic_conn ) - pass_def = constants.config.get_configuration_definition( + pass_def = ansible_constants.config.get_configuration_definition( "password", "connection", sonic_conn ) for user_var in (_['name'] for _ in user_def['vars']): From 88edc514cbce20857407d060d5e48e77b388df58 Mon Sep 17 00:00:00 2001 From: Anton Ptashnik Date: Sun, 15 Aug 2021 09:44:45 +0300 Subject: [PATCH 110/117] test_tor_ecn: added a precondition skipping the test when a helper script is missing (#4009) some test_tor_ecn tests require a helper script write_standby.py which can be missing and no docs provided on it, so added a skip when the script is missing --- tests/dualtor/test_tor_ecn.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/dualtor/test_tor_ecn.py b/tests/dualtor/test_tor_ecn.py index 80cac3b18a4..ecc7515b0bf 100644 --- a/tests/dualtor/test_tor_ecn.py +++ b/tests/dualtor/test_tor_ecn.py @@ -259,6 +259,18 @@ def test_dscp_to_queue_during_decap_on_active( else: logging.info("the expected Queue : {} matching with received Queue : {}".format(exp_queue, rec_queue)) +@pytest.fixture(scope='module') +def write_standby(rand_selected_dut): + file = "/usr/local/bin/write_standby.py" + def runcmd(): + rand_selected_dut.shell(file) + + try: + rand_selected_dut.shell("ls %s" % file) + return runcmd + except: + pytest.skip('file {} not found'.format(file)) + def test_dscp_to_queue_during_encap_on_standby( build_non_encapsulated_ip_packet, rand_selected_interface, ptfadapter, @@ -266,12 +278,13 @@ def test_dscp_to_queue_during_encap_on_standby( rand_selected_dut, tunnel_traffic_monitor, duthosts, - rand_one_dut_hostname + rand_one_dut_hostname, + write_standby ): """ Test if DSCP to Q mapping for outer header is matching with inner header during encap on standby """ - rand_selected_dut.shell("/usr/local/bin/write_standby.py") + write_standby() tor = rand_selected_dut non_encapsulated_packet = build_non_encapsulated_ip_packet @@ -325,12 +338,13 @@ def test_ecn_during_encap_on_standby( ptfadapter, tbinfo, rand_selected_dut, - tunnel_traffic_monitor + tunnel_traffic_monitor, + write_standby ): """ Test if the ECN stamping on outer header is matching with inner during encap on standby """ - rand_selected_dut.shell("/usr/local/bin/write_standby.py") + write_standby() tor = rand_selected_dut non_encapsulated_packet = build_non_encapsulated_ip_packet From 909c7b3ed655271f8149f4d009a559aad9596eab Mon Sep 17 00:00:00 2001 From: slutati1536 <69785882+slutati1536@users.noreply.github.com> Date: Sun, 15 Aug 2021 11:31:41 +0300 Subject: [PATCH 111/117] Do neighbor VM restore only if there's VM's (#4015) Execute the logic of neighbor_vm_restore only in cases where vm_neighbors is not an empty list. Summary: Function neighbor_vm_restore is run during sanity in cases of failure in the setup. However in setups that do not include VM's this function is causing errors in the test. It is more correct to run the recover logic only if the vm_neighbors list is not empty. that way we could avoided such errors and the function is more correct. - How did you do it? Add an if statement to function - How did you verify/test it? I run the tests with sanity afterward and verified they passed - Any platform specific information? It's relevant to all platforms --- tests/common/plugins/sanity_check/recover.py | 27 ++++++++++---------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/common/plugins/sanity_check/recover.py b/tests/common/plugins/sanity_check/recover.py index 2ee3c8973e8..100caeef30c 100644 --- a/tests/common/plugins/sanity_check/recover.py +++ b/tests/common/plugins/sanity_check/recover.py @@ -109,16 +109,17 @@ def neighbor_vm_restore(duthost, nbrhosts, tbinfo): logger.info("Restoring neighbor VMs for {}".format(duthost)) mg_facts = duthost.get_extended_minigraph_facts(tbinfo) vm_neighbors = mg_facts['minigraph_neighbors'] - lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] - - for lag_name in lag_facts['names']: - nbr_intf = lag_facts['lags'][lag_name]['po_config']['ports'].keys()[0] - peer_device = vm_neighbors[nbr_intf]['name'] - nbr_host = nbrhosts[peer_device]['host'] - intf_list = nbrhosts[peer_device]['conf']['interfaces'].keys() - # restore interfaces and portchannels - for intf in intf_list: - nbr_host.no_shutdown(intf) - asn = nbrhosts[peer_device]['conf']['bgp']['asn'] - # restore BGP session - nbr_host.no_shutdown_bgp(asn) + if vm_neighbors: + lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] + + for lag_name in lag_facts['names']: + nbr_intf = lag_facts['lags'][lag_name]['po_config']['ports'].keys()[0] + peer_device = vm_neighbors[nbr_intf]['name'] + nbr_host = nbrhosts[peer_device]['host'] + intf_list = nbrhosts[peer_device]['conf']['interfaces'].keys() + # restore interfaces and portchannels + for intf in intf_list: + nbr_host.no_shutdown(intf) + asn = nbrhosts[peer_device]['conf']['bgp']['asn'] + # restore BGP session + nbr_host.no_shutdown_bgp(asn) From 67266b9ebbe8945f090e01c7a7224b879a74cd76 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Mon, 16 Aug 2021 04:19:04 +0300 Subject: [PATCH 112/117] [portstat] [telemetry] [snmp] Align tests to delay of flex counters change (#3903) ### Approach #### What is the motivation for this PR? Following PR's: https://github.com/Azure/sonic-swss/pull/1803 https://github.com/Azure/sonic-swss/pull/1804 Flex counters are delayed and these tests are failing as a result of missing information on the DUT. This PR is to wait until all counters are enabled before running the test. Timeout value chosen by the delay script which can be found here: https://github.com/Azure/sonic-buildimage/blob/master/dockers/docker-orchagent/enable_counters.py #### How did you do it? Wait until all counters are enabled before running the tests. #### How did you verify/test it? Run the tests with this change. --- tests/conftest.py | 24 ++++++++++++++++++++++++ tests/portstat/test_portstat.py | 2 +- tests/snmp/conftest.py | 4 ++++ tests/telemetry/test_telemetry.py | 2 +- 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 7b38fd05fb9..48c7ca1f43e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -36,6 +36,8 @@ from tests.common.connections.console_host import ConsoleHost +WAIT_FOR_COUNTERS_TIMEOUT = 190 +WAIT_FOR_COUNTERS_INTERVAL = 10 logger = logging.getLogger(__name__) cache = FactsCache() @@ -1143,3 +1145,25 @@ def duts_minigraph_facts(duthosts, tbinfo): } """ return duthosts.get_extended_minigraph_facts(tbinfo) + +@pytest.fixture(scope='function') +def wait_for_counters(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + logger.info('Wait until all counters are enabled on the DUT') + counters = ['PORT_STAT', 'PORT_BUFFER_DROP', 'QUEUE_STAT', 'PG_WATERMARK_STAT', 'RIF_STAT'] + current_attempt = 0 + while current_attempt < WAIT_FOR_COUNTERS_TIMEOUT / WAIT_FOR_COUNTERS_INTERVAL: + output = duthost.shell("counterpoll show | sed '1,2d'", module_ignore_errors=True) + assert output.has_key('rc') and output['rc'] == 0, "Failed to get counters status" + counters_lines = output['stdout'].splitlines() + enabled_counters = 0 + for line in counters_lines: + if any(counter in line for counter in counters) and 'enable' in line: + enabled_counters += 1 + continue + if enabled_counters == len(counters): + return + else: + current_attempt += 1 + time.sleep(WAIT_FOR_COUNTERS_INTERVAL) + assert False, "Not all counters are enabled after {} seconds".format(WAIT_FOR_COUNTERS_TIMEOUT) diff --git a/tests/portstat/test_portstat.py b/tests/portstat/test_portstat.py index b156396abed..3607205de15 100644 --- a/tests/portstat/test_portstat.py +++ b/tests/portstat/test_portstat.py @@ -98,7 +98,7 @@ def reset_portstat(duthosts, enum_rand_one_per_hwsku_frontend_hostname): @pytest.mark.parametrize('command', ['portstat -c', 'portstat --clear']) -def test_portstat_clear(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command): +def test_portstat_clear(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command, wait_for_counters): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] wait(30, 'Wait for DUT to receive/send some packets') before_portstat = parse_portstat(duthost.command('portstat')['stdout_lines']) diff --git a/tests/snmp/conftest.py b/tests/snmp/conftest.py index 3877404cc47..8dec106f9d0 100644 --- a/tests/snmp/conftest.py +++ b/tests/snmp/conftest.py @@ -6,6 +6,10 @@ def setup_check_snmp_ready(duthosts): for duthost in duthosts: assert wait_until(300, 20, duthost.is_service_fully_started, "snmp"), "SNMP service is not running" +@pytest.fixture(scope="function", autouse=True) +def snmp_wait_for_counters(wait_for_counters): + return + def pytest_addoption(parser): """ Adds options to pytest that are used by the snmp tests. diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index d0e7344c300..8318940e39f 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -153,7 +153,7 @@ def test_telemetry_enabledbydefault(duthosts, rand_one_dut_hostname): status_expected = "enabled"; pytest_assert(str(v) == status_expected, "Telemetry feature is not enabled") -def test_telemetry_ouput(duthosts, rand_one_dut_hostname, ptfhost, setup_streaming_telemetry, localhost): +def test_telemetry_ouput(duthosts, rand_one_dut_hostname, ptfhost, setup_streaming_telemetry, localhost, wait_for_counters): """Run pyclient from ptfdocker and show gnmi server outputself. """ duthost = duthosts[rand_one_dut_hostname] From 143fb4a79d89d2fff9c7a6e2ccd55c4ac74d20c3 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Mon, 16 Aug 2021 14:01:35 +0800 Subject: [PATCH 113/117] Add test script for null_route_helper (#3812) * Add test script for null_route_helper Signed-off-by: bingwang --- tests/acl/null_route/acl.json | 272 ++++++++++++++++++ .../acl/null_route/test_null_route_helper.py | 181 ++++++++++++ 2 files changed, 453 insertions(+) create mode 100644 tests/acl/null_route/acl.json create mode 100644 tests/acl/null_route/test_null_route_helper.py diff --git a/tests/acl/null_route/acl.json b/tests/acl/null_route/acl.json new file mode 100644 index 00000000000..0e03fa9dc27 --- /dev/null +++ b/tests/acl/null_route/acl.json @@ -0,0 +1,272 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "NULL_ROUTE_ACL_TABLE_V4": { + "acl-entries": { + "acl-entry": { + "1": { + "config": { + "sequence-id": 1 + }, + "actions": { + "config": { + "forwarding-action": "DROP" + } + }, + "ip": { + "config": { + "destination-ip-address": "9.5.9.3/32" + } + } + }, + "2": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 2 + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "destination-ip-address": "10.2.1.2/32" + } + }, + "transport": { + "config": { + "destination-port": "22" + } + } + }, + "3": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 3 + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "destination-ip-address": "10.2.1.2/32" + } + }, + "transport": { + "config": { + "destination-port": "443" + } + } + }, + "4": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 4 + }, + "ip": { + "config": { + "protocol": "IP_UDP" + } + }, + "transport": { + "config": { + "destination-port": "67" + } + } + }, + "5": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 5 + }, + "ip": { + "config": { + "protocol": "IP_ICMP" + } + } + }, + "6": { + "config": { + "sequence-id": 6 + }, + "actions": { + "config": { + "forwarding-action": "DROP" + } + }, + "ip": { + "config": { + "destination-ip-address": "10.2.1.2/32" + } + } + }, + "9998": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 9998 + }, + "ip": { + "config": { + "source-ip-address": "0.0.0.0/0", + "destination-ip-address": "0.0.0.0/0" + } + } + } + } + }, + "config": { + "name": "NULL_ROUTE_ACL_TABLE_V4" + } + }, + "NULL_ROUTE_ACL_TABLE_V6": { + "acl-entries": { + "acl-entry": { + "1": { + "config": { + "sequence-id": 1 + }, + "actions": { + "config": { + "forwarding-action": "DROP" + } + }, + "ip": { + "config": { + "destination-ip-address": "103:23:2:1::1/128" + } + } + }, + "2": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 2 + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "destination-ip-address": "103:23:2:1::1/128" + } + }, + "transport": { + "config": { + "destination-port": "22" + } + } + }, + "3": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 3 + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "destination-ip-address": "103:23:2:1::1/128" + } + }, + "transport": { + "config": { + "destination-port": "443" + } + } + }, + "4": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 4 + }, + "ip": { + "config": { + "protocol": "IP_UDP" + } + }, + "transport": { + "config": { + "destination-port": "67" + } + } + }, + "5": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 5 + }, + "ip": { + "config": { + "protocol": "IP_ICMP" + } + } + }, + "6": { + "config": { + "sequence-id": 6 + }, + "actions": { + "config": { + "forwarding-action": "DROP" + } + }, + "ip": { + "config": { + "destination-ip-address": "103:23:2:1::1/128" + } + } + }, + "9998": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 9998 + }, + "ip": { + "config": { + "source-ip-address": "::/0", + "destination-ip-address": "::/0" + } + } + } + } + }, + "config": { + "name": "NULL_ROUTE_ACL_TABLE_V6" + } + } + } + } + } +} diff --git a/tests/acl/null_route/test_null_route_helper.py b/tests/acl/null_route/test_null_route_helper.py new file mode 100644 index 00000000000..657d405a187 --- /dev/null +++ b/tests/acl/null_route/test_null_route_helper.py @@ -0,0 +1,181 @@ +import ipaddress +import logging +import random +import pytest +import os +import time + +from ptf.mask import Mask +import ptf.packet as scapy + +from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] +import ptf.testutils as testutils + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology("t0") +] + +ACL_JSON_FILE_SRC = "acl/null_route/acl.json" +ACL_JSON_FILE_DEST = "/host/" + os.path.basename(ACL_JSON_FILE_SRC) + +ACL_TABLE_NAME_V4 = "NULL_ROUTE_ACL_TABLE_V4" +ACL_TABLE_NAME_V6 = "NULL_ROUTE_ACL_TABLE_V6" + +NULL_ROUTE_HELPER = "null_route_helper" + +DST_IP = { + 4: "192.168.0.2", + 6: "fc02:1000::2" +} + +FORWARD = "FORWARD" +DROP = "DROP" + +TEST_DATA = [ + # src_ip, action, expected_result + ("1.2.3.4", "", FORWARD), # Should be forwared in default + ("fc03:1001::1", "", FORWARD), # Should be forwared in default + + ("1.2.3.4", "block {} 1.2.3.4".format(ACL_TABLE_NAME_V4), DROP), # Verify block ipv4 without prefix len + ("1.2.3.4", "unblock {} 1.2.3.4/32".format(ACL_TABLE_NAME_V4), FORWARD), # Verify unblock ipv4 with prefix len + ("1.2.3.4", "block {} 1.2.3.4/32".format(ACL_TABLE_NAME_V4), DROP), # Verify block ipv4 with prefix len + ("1.2.3.4", "block {} 1.2.3.4/32".format(ACL_TABLE_NAME_V4), DROP), # Verify double-block dosen't cause issue + ("1.2.3.4", "unblock {} 1.2.3.4/32".format(ACL_TABLE_NAME_V4), FORWARD), # Verify unblock ipv4 with prefix len + ("1.2.3.4", "unblock {} 1.2.3.4/32".format(ACL_TABLE_NAME_V4), FORWARD), # Verify double-unblock doesn't cause issue + + ("fc03:1000::1", "block {} fc03:1000::1".format(ACL_TABLE_NAME_V6), DROP), # Verify block ipv6 without prefix len + ("fc03:1000::1", "unblock {} fc03:1000::1/128".format(ACL_TABLE_NAME_V6), FORWARD), # Verify unblock ipv6 with prefix len + ("fc03:1000::1", "block {} fc03:1000::1/128".format(ACL_TABLE_NAME_V6), DROP), # Verify block ipv6 with prefix len + ("fc03:1000::1", "block {} fc03:1000::1/128".format(ACL_TABLE_NAME_V6), DROP), # Verify double-block dosen't cause issue + ("fc03:1000::1", "unblock {} fc03:1000::1/128".format(ACL_TABLE_NAME_V6), FORWARD), # Verify unblock ipv4 with prefix len + ("fc03:1000::1", "unblock {} fc03:1000::1/128".format(ACL_TABLE_NAME_V6), FORWARD), # Verify double-unblock doesn't cause issue +] + + +@pytest.fixture(scope="module") +def create_acl_table(rand_selected_dut, tbinfo): + """ + Create two ACL tables on DUT for testing. + """ + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + # Get the list of LAGs + port_channels = ",".join(mg_facts["minigraph_portchannels"].keys()) + cmds = [ + "config acl add table {} L3 -p {}".format(ACL_TABLE_NAME_V4, port_channels), + "config acl add table {} L3V6 -p {}".format(ACL_TABLE_NAME_V6, port_channels) + ] + rand_selected_dut.shell_cmds(cmds=cmds) + yield + + cmds= [ + "config acl remove table {}".format(ACL_TABLE_NAME_V4), + "config acl remove table {}".format(ACL_TABLE_NAME_V6) + ] + rand_selected_dut.shell_cmds(cmds=cmds) + + +@pytest.fixture(scope="module") +def apply_pre_defined_rules(rand_selected_dut, create_acl_table): + """ + This is to apply some ACL rules as production does + """ + rand_selected_dut.copy(src=ACL_JSON_FILE_SRC, dest=ACL_JSON_FILE_DEST) + rand_selected_dut.shell("acl-loader update full " + ACL_JSON_FILE_DEST) + yield + # Clear ACL rules + rand_selected_dut.shell('sonic-db-cli CONFIG_DB keys "ACL_RULE|{}*" | xargs sonic-db-cli CONFIG_DB del'.format(ACL_TABLE_NAME_V4)) + rand_selected_dut.shell('sonic-db-cli CONFIG_DB keys "ACL_RULE|{}*" | xargs sonic-db-cli CONFIG_DB del'.format(ACL_TABLE_NAME_V6)) + + +@pytest.fixture(scope="module") +def setup_ptf(rand_selected_dut, ptfhost, tbinfo): + """ + Add ipv4 and ipv6 address to a port on ptf. + """ + dst_ports = {} + vlan_name = "" + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + for vlan_info in mg_facts["minigraph_vlan_interfaces"]: + ip_ver = ipaddress.ip_network(vlan_info['addr'], False).version + dst_ports[ip_ver] = str(ipaddress.ip_address(vlan_info['addr']) + 1) + '/' + str(vlan_info['prefixlen']) + vlan_name = vlan_info['attachto'] + + vlan_port = mg_facts['minigraph_vlans'][vlan_name]['members'][0] + dst_ports['port'] = mg_facts['minigraph_ptf_indices'][vlan_port] + + logger.info("Setting up ptf for testing") + ptfhost.shell("ifconfig eth{} {}".format(dst_ports['port'], dst_ports[4])) + ptfhost.shell("ifconfig eth{} inet6 add {}".format(dst_ports['port'], dst_ports[6])) + + yield dst_ports + ptfhost.shell("ifconfig eth{} 0.0.0.0".format(dst_ports['port'])) + ptfhost.shell("ifconfig eth{} inet6 del {}".format(dst_ports['port'], dst_ports[6])) + + +def generate_packet(src_ip, dst_ip, dst_mac): + """ + Build ipv4 and ipv6 packets/expected_packets for testing. + """ + if ipaddress.ip_network(unicode(src_ip), False).version == 4: + pkt = testutils.simple_ip_packet(eth_dst=dst_mac, ip_src=src_ip, ip_dst=dst_ip) + exp_pkt = Mask(pkt) + exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") + exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + else: + pkt = testutils.simple_tcpv6_packet(eth_dst=dst_mac, ipv6_src=src_ip, ipv6_dst=dst_ip) + exp_pkt = Mask(pkt) + exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim") + + return pkt, exp_pkt + + +def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action): + """ + Send packet with ptfadapter and verify if packet is forwarded or dropped as expected. + """ + ptfadapter.dataplane.flush() + testutils.send_packet(ptfadapter, pkt = pkt, port_id=tx_port) + rcvd = testutils.count_matched_packets(ptfadapter, exp_pkt, rx_port) + if expected_action == FORWARD: + return rcvd == 1 + else: + return rcvd == 0 + + +def test_null_route_helper(rand_selected_dut, tbinfo, ptfadapter, apply_pre_defined_rules, setup_ptf): + """ + Test case to verify script null_route_helper. + Some packets are generated as defined in TEST_DATA and sent to DUT, and verify if packet is forwarded or dropped as expected. + """ + ptf_port_info = setup_ptf + rx_port = ptf_port_info['port'] + router_mac = rand_selected_dut.facts["router_mac"] + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + portchannel_members = [] + for _, v in mg_facts["minigraph_portchannels"].items(): + portchannel_members += v['members'] + + ptf_t1_interfaces = [] + for port in portchannel_members: + ptf_t1_interfaces.append(mg_facts['minigraph_ptf_indices'][port]) + + # Run testing as defined in TEST_DATA + for test_item in TEST_DATA: + src_ip = test_item[0] + action = test_item[1] + expected_result = test_item[2] + ip_ver = ipaddress.ip_network(unicode(src_ip), False).version + logger.info("Testing with src_ip = {} action = {} expected_result = {}" + .format(src_ip, action, expected_result)) + pkt, exp_pkt = generate_packet(src_ip, DST_IP[ip_ver], router_mac) + if action != "": + rand_selected_dut.shell(NULL_ROUTE_HELPER + " " + action) + time.sleep(1) + + assert(send_and_verify_packet(ptfadapter, pkt, exp_pkt, random.choice(ptf_t1_interfaces), rx_port, expected_result)) From 88c6b37d6f1274792b489f5f0d15d61b5c001bc7 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 16 Aug 2021 10:34:43 +0300 Subject: [PATCH 114/117] [warm-reboot][sad] enhance test to cover different port types and oper state in sad scenarios (#3853) ### Description of PR Summary: Enhance warm reboot sad path test cases. Fixes https://github.com/Azure/sonic-mgmt/issues/3683 ### Type of change - [x] Test case(new/improvement) ### Approach #### What is the motivation for this PR? The motivation is to fill the test gap to test different ports admin and operationally down before warm-reboot and check their status after warm-reboot. Sanity will make sure the ports changed are back up. #### How did you do it? - For port down cases implemented a special logic for port selection based on port physical properties. - For port down cases implemented also port operational state change. - As of second enhancement in the list there was a neccessary refactoring made, few sad operations were taken out from ptf script and implemented in pytest. Old sad path wasn't changed to support ansible compatiblity. This is partial refactoring only for enabling us to implement first two items from this list. Other sad operations need to be taken to pytest as well in the future. #### How did you verify/test it? Running warm_reboot_multi_sad. --- .../test/files/ptftests/advanced-reboot.py | 18 +- tests/common/devices/sonic.py | 2 + tests/common/fixtures/advanced_reboot.py | 129 ++++---- tests/common/helpers/sad_path.py | 280 ++++++++++++++++++ tests/platform_tests/test_advanced_reboot.py | 95 +++--- 5 files changed, 423 insertions(+), 101 deletions(-) create mode 100644 tests/common/helpers/sad_path.py diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py index e278089dc12..3f40108c776 100644 --- a/ansible/roles/test/files/ptftests/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/advanced-reboot.py @@ -156,6 +156,7 @@ def __init__(self): self.check_param('vnet_pkts', None, required=False) self.check_param('target_version', '', required=False) self.check_param('bgp_v4_v6_time_diff', 40, required=False) + self.check_param('logfile_suffix', None, required=False) if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None': self.test_params['preboot_oper'] = None if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None': @@ -167,9 +168,14 @@ def __init__(self): else: self.sad_oper = self.test_params['inboot_oper'] - if self.sad_oper: - self.log_file_name = '/tmp/%s-%s.log' % (self.test_params['reboot_type'], self.sad_oper) - self.report_file_name = '/tmp/%s-%s.json' % (self.test_params['reboot_type'], self.sad_oper) + if self.test_params['logfile_suffix']: + self.logfile_suffix = self.test_params['logfile_suffix'] + else: + self.logfile_suffix = self.sad_oper + + if self.logfile_suffix: + self.log_file_name = '/tmp/%s-%s.log' % (self.test_params['reboot_type'], self.logfile_suffix) + self.report_file_name = '/tmp/%s-%s.json' % (self.test_params['reboot_type'], self.logfile_suffix) else: self.log_file_name = '/tmp/%s.log' % self.test_params['reboot_type'] self.report_file_name = '/tmp/%s-report.json' % self.test_params['reboot_type'] @@ -354,7 +360,7 @@ def generate_arp_responder_conf(self, vlan_host_map): def dump_arp_responder_config(self, dump): # save data for arp_replay process - filename = "/tmp/from_t1.json" if self.sad_oper is None else "/tmp/from_t1_%s.json" % self.sad_oper + filename = "/tmp/from_t1.json" if self.logfile_suffix is None else "/tmp/from_t1_%s.json" % self.logfile_suffix with open(filename, "w") as fp: json.dump(dump, fp) @@ -1356,7 +1362,7 @@ def sniff_in_background(self, wait = None): self.sniffer_started.clear() def save_sniffed_packets(self): - filename = "/tmp/capture_%s.pcap" % self.sad_oper if self.sad_oper is not None else "/tmp/capture.pcap" + filename = "/tmp/capture_%s.pcap" % self.logfile_suffix if self.logfile_suffix is not None else "/tmp/capture.pcap" if self.packets: scapyall.wrpcap(filename, self.packets) self.log("Pcap file dumped to %s" % filename) @@ -1510,7 +1516,7 @@ def examine_flow(self, filename = None): self.log("Gaps in forwarding not found.") self.log("Total incoming packets captured %d" % received_counter) if packets: - filename = '/tmp/capture_filtered.pcap' if self.sad_oper is None else "/tmp/capture_filtered_%s.pcap" % self.sad_oper + filename = '/tmp/capture_filtered.pcap' if self.logfile_suffix is None else "/tmp/capture_filtered_%s.pcap" % self.logfile_suffix scapyall.wrpcap(filename, packets) self.log("Filtered pcap dumped to %s" % filename) diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 28bd682b18b..d21ecf8512a 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -794,6 +794,7 @@ def shutdown(self, ifname): Args: ifname: the interface to shutdown """ + logging.info("Shutting down {}".format(ifname)) return self.command("sudo config interface shutdown {}".format(ifname)) def shutdown_multiple(self, ifnames): @@ -813,6 +814,7 @@ def no_shutdown(self, ifname): Args: ifname: the interface to bring up """ + logging.info("Starting up {}".format(ifname)) return self.command("sudo config interface startup {}".format(ifname)) def no_shutdown_multiple(self, ifnames): diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py index ff2da553e26..60f0e594f53 100644 --- a/tests/common/fixtures/advanced_reboot.py +++ b/tests/common/fixtures/advanced_reboot.py @@ -1,3 +1,4 @@ +import copy import ipaddress import itertools import json @@ -8,6 +9,7 @@ from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice from tests.common.platform.ssh_utils import prepare_testbed_ssh_keys as prepareTestbedSshKeys from tests.common.reboot import reboot as rebootDut +from tests.common.helpers.sad_path import SadOperation from tests.ptf_runner import ptf_runner logger = logging.getLogger(__name__) @@ -199,6 +201,10 @@ def __validateAndBuildSadList(self): sadList = [item for item in itertools.chain(prebootList, inbootList)] for item in sadList: + # TODO: Move all sad path logic out of ptf script to pytest. + # Once done, we can make a sad_operation fixture. + if isinstance(item, SadOperation): + continue if ':' not in item: continue itemCnt = int(item.split(':')[-1]) @@ -212,16 +218,6 @@ def __validateAndBuildSadList(self): 'Lag count is greater than or equal to number of VM hosts. ' 'Current val = {0} Max val = {1}' ).format(itemCnt, self.hostMaxLen) - if 'lag_member_down' in item: - assert itemCnt <= self.lagMemberCnt, ( - 'Lag member count is greater than available number of lag members. ' - 'Current val = {0} Available cnt = {1}' - ).format(itemCnt, self.lagMemberCnt) - if 'vlan_port_down' in item: - assert itemCnt <= self.vlanMaxCnt, ( - 'Vlan count is greater than or equal to number of Vlan interfaces. ' - 'Current val = {0} Max val = {1}' - ).format(itemCnt, self.vlanMaxCnt) if 'routing' in item: assert itemCnt <= self.hostMaxCnt, ( 'Number of prefixes is greater than allowed max. ' @@ -330,15 +326,6 @@ def __setupTestbed(self): ''' Sets testbed up. It tranfers test data files, ARP responder, and runs script to update IPs and MAC addresses. ''' - testDataFiles = [ - {'source' : self.mgFacts['minigraph_portchannels'], 'name' : 'portchannel_interfaces'}, - {'source' : self.mgFacts['minigraph_vlans'], 'name' : 'vlan_interfaces' }, - {'source' : self.mgFacts['minigraph_ptf_indices'], 'name' : 'ports' }, - {'source' : self.mgFacts['minigraph_devices'], 'name' : 'peer_dev_info' }, - {'source' : self.mgFacts['minigraph_neighbors'], 'name' : 'neigh_port_info' }, - ] - self.__transferTestDataFiles(testDataFiles, self.ptfhost) - self.__runScript(['remove_ip.sh'], self.ptfhost) self.__prepareTestbedSshKeys() @@ -441,11 +428,14 @@ def runRebootTest(self): for rebootOper in self.rebootData['sadList']: count += 1 try: + self.__setupRebootOper(rebootOper) result = self.__runPtfRunner(rebootOper) + self.__verifyRebootOper(rebootOper) finally: # always capture the test logs self.__fetchTestLogs(rebootOper) self.__clearArpAndFdbTables() + self.__revertRebootOper(rebootOper) if not result: return result if len(self.rebootData['sadList']) > 1 and count != len(self.rebootData['sadList']): @@ -463,6 +453,38 @@ def runRebootTestcase(self, prebootList=None, inbootList=None, prebootFiles=None self.imageInstall(prebootList, inbootList, prebootFiles) return self.runRebootTest() + def __setupRebootOper(self, rebootOper): + testData = { + 'portchannel_interfaces': copy.deepcopy(self.mgFacts['minigraph_portchannels']), + 'vlan_interfaces': copy.deepcopy(self.mgFacts['minigraph_vlans']), + 'ports': copy.deepcopy(self.mgFacts['minigraph_ptf_indices']), + 'peer_dev_info': copy.deepcopy(self.mgFacts['minigraph_devices']), + 'neigh_port_info': copy.deepcopy(self.mgFacts['minigraph_neighbors']), + } + + if isinstance(rebootOper, SadOperation): + logger.info('Running setup handler for reboot operation {}'.format(rebootOper)) + rebootOper.setup(testData) + + # TODO: remove this parameter. Arista VMs can be read by ptf from peer_dev_info. + self.rebootData['arista_vms'] = [ + attr['mgmt_addr'] for dev, attr in testData['peer_dev_info'].items() if attr['hwsku'] == 'Arista-VM' + ] + self.hostMaxLen = len(self.rebootData['arista_vms']) - 1 + + testDataFiles = [{'source': source, 'name': name} for name, source in testData.items()] + self.__transferTestDataFiles(testDataFiles, self.ptfhost) + + def __verifyRebootOper(self, rebootOper): + if isinstance(rebootOper, SadOperation): + logger.info('Running verify handler for reboot operation {}'.format(rebootOper)) + rebootOper.verify() + + def __revertRebootOper(self, rebootOper): + if isinstance(rebootOper, SadOperation): + logger.info('Running revert handler for reboot operation {}'.format(rebootOper)) + rebootOper.revert() + def __runPtfRunner(self, rebootOper=None): ''' Run single PTF advanced-reboot.ReloadTest @@ -470,12 +492,43 @@ def __runPtfRunner(self, rebootOper=None): ''' logger.info("Running PTF runner on PTF host: {0}".format(self.ptfhost)) - # Non-routing neighbor/dut lag/bgp, vlan port up/down operation is performed before dut reboot process - # lack of routing indicates it is preboot operation - prebootOper = rebootOper if rebootOper is not None and 'routing' not in rebootOper else None - # Routing add/remove is performed during dut reboot process - # presence of routing in reboot operation indicates it is during reboot operation (inboot) - inbootOper = rebootOper if rebootOper is not None and 'routing' in rebootOper else None + params={ + "dut_username" : self.rebootData['dut_username'], + "dut_password" : self.rebootData['dut_password'], + "dut_hostname" : self.rebootData['dut_hostname'], + "reboot_limit_in_seconds" : self.rebootLimit, + "reboot_type" : self.rebootType, + "portchannel_ports_file" : self.rebootData['portchannel_interfaces_file'], + "vlan_ports_file" : self.rebootData['vlan_interfaces_file'], + "ports_file" : self.rebootData['ports_file'], + "dut_mac" : self.rebootData['dut_mac'], + "default_ip_range" : self.rebootData['default_ip_range'], + "vlan_ip_range" : self.rebootData['vlan_ip_range'], + "lo_v6_prefix" : self.rebootData['lo_v6_prefix'], + "arista_vms" : self.rebootData['arista_vms'], + "nexthop_ips" : self.rebootData['nexthop_ips'], + "allow_vlan_flooding" : self.allowVlanFlooding, + "sniff_time_incr" : self.sniffTimeIncr, + "setup_fdb_before_test" : True, + "vnet" : self.vnet, + "vnet_pkts" : self.vnetPkts, + "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff + } + + if not isinstance(rebootOper, SadOperation): + # Non-routing neighbor/dut lag/bgp, vlan port up/down operation is performed before dut reboot process + # lack of routing indicates it is preboot operation + prebootOper = rebootOper if rebootOper is not None and 'routing' not in rebootOper else None + # Routing add/remove is performed during dut reboot process + # presence of routing in reboot operation indicates it is during reboot operation (inboot) + inbootOper = rebootOper if rebootOper is not None and 'routing' in rebootOper else None + params.update({ + "preboot_files" : self.prebootFiles, + "preboot_oper" : prebootOper, + "inboot_oper" : inbootOper, + }) + else: + params.update({'logfile_suffix': str(rebootOper)}) self.__updateAndRestartArpResponder(rebootOper) @@ -487,31 +540,7 @@ def __runPtfRunner(self, rebootOper=None): qlen=PTFRUNNER_QLEN, platform_dir="ptftests", platform="remote", - params={ - "dut_username" : self.rebootData['dut_username'], - "dut_password" : self.rebootData['dut_password'], - "dut_hostname" : self.rebootData['dut_hostname'], - "reboot_limit_in_seconds" : self.rebootLimit, - "reboot_type" : self.rebootType, - "portchannel_ports_file" : self.rebootData['portchannel_interfaces_file'], - "vlan_ports_file" : self.rebootData['vlan_interfaces_file'], - "ports_file" : self.rebootData['ports_file'], - "dut_mac" : self.rebootData['dut_mac'], - "default_ip_range" : self.rebootData['default_ip_range'], - "vlan_ip_range" : self.rebootData['vlan_ip_range'], - "lo_v6_prefix" : self.rebootData['lo_v6_prefix'], - "arista_vms" : self.rebootData['arista_vms'], - "preboot_files" : self.prebootFiles, - "preboot_oper" : prebootOper, - "inboot_oper" : inbootOper, - "nexthop_ips" : self.rebootData['nexthop_ips'], - "allow_vlan_flooding" : self.allowVlanFlooding, - "sniff_time_incr" : self.sniffTimeIncr, - "setup_fdb_before_test" : True, - "vnet" : self.vnet, - "vnet_pkts" : self.vnetPkts, - "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff - }, + params=params, log_file=u'/tmp/advanced-reboot.ReloadTest.log', module_ignore_errors=self.moduleIgnoreErrors ) diff --git a/tests/common/helpers/sad_path.py b/tests/common/helpers/sad_path.py new file mode 100644 index 00000000000..e61cf4570c3 --- /dev/null +++ b/tests/common/helpers/sad_path.py @@ -0,0 +1,280 @@ +""" Module contains sad path operations. """ + +import datetime +import logging + +from itertools import groupby, chain, islice + +from tests.common.platform.device_utils import fanout_switch_port_lookup + +logger = logging.getLogger(__name__) + + +class SadOperation(object): + """ SadOperation interface class. """ + + def setup(self, test_data): + """ Perform sad path setup operations and modify the test_data + passed to PTF script accordingly. """ + raise NotImplementedError + + # TODO: split verification into to phases - preboot verify and postboot verify. + # Currently there is no verification in sad_path.py done prior to warm-reboot. + # So it could be a race when sad operation takes a while to affect the testbed + # but we are doing warm-reboot prior to that. Currently the preparation in + # advanced-reboot.py is long enough so that won't happen. + def verify(self): + """ Verify handler that runs after warm-reboot completes. + Checks sad path operataions result after warm-reboot. """ + raise NotImplementedError + + def revert(self): + """ Revert changes done in setup. """ + raise NotImplementedError + + +class Selector(object): + """ Selector interface provides a select() method + to choose test objects from the input list. """ + + def select(self, objlist): + """ Choose test objects from objlist. """ + raise NotImplementedError + + +class PhyPropsPortSelector(Selector): + """ Select the port based on physical port settings. """ + + def __init__(self, duthost, count): + self.duthost = duthost + self.count = count + + def select(self, objlist): + port_table = self.duthost.get_running_config_facts()["PORT"] + + def group_func(port): + _, attrs = port + width = len(attrs["lanes"].split(",")) + speed = attrs.get("speed") + fec = attrs.get("fec") + + return width, speed, fec + + # For ports passed to this method group them by width, speed and fec + # and choose number of ports taking each from a different group. + # If is greater then the number of groups start over till we fill + # the output list with the number of ports requested. + # Assertion is raised when there are no enough ports. + port_items = [(name, attrs) for name, attrs in port_table.items() if name in objlist] + assert len(port_items) >= self.count, "No enough ports to test, required at least {}".format(self.count) + groups = [list(group) for _, group in groupby(sorted(port_items, key=group_func), key=group_func)] + return [name for name, _ in islice(chain.from_iterable(zip(*groups)), self.count)] + + +class DatetimeSelector(Selector): + """ Select from list based on current datetime. """ + + def __init__(self, count): + self.count = count + + def select(self, objlist): + assert len(objlist) >= self.count, "Not enough elements, required at least {}".format(self.count) + # Get some start index and select items from the list + # starting from index till the end, if the amount is less then + # self.count it will fill the rest starting from the beginning of + # the list. + index = datetime.datetime.now().day % len(objlist) + selected = (objlist[index:] + objlist[:index])[:self.count] + return selected + + +class VlanMemberDown(SadOperation): + """ Base class for vlan member down scenarios. """ + + def __init__(self, duthost, port_selector): + self.duthost = duthost + self.ports = port_selector.select(duthost.get_vlan_intfs()) + + def setup(self, test_data): + vlans = test_data["vlan_interfaces"] + # Exclude down vlan members + for vlan in vlans.values(): + vlan["members"] = list(set(vlan["members"]) - set(self.ports)) + + +class DutVlanMemberDown(VlanMemberDown): + """ Sad path test case to verify warm-reboot when vlan member port goes administartively down. """ + + def __init__(self, duthost, port_selector): + super(DutVlanMemberDown, self).__init__(duthost, port_selector) + logger.info("Selected ports for DUT vlan member down case {}".format(self.ports)) + + def setup(self, test_data): + super(DutVlanMemberDown, self).setup(test_data) + self.duthost.shutdown_multiple(self.ports) + + def verify(self): + facts = self.duthost.show_interface(command="status", interfaces=self.ports) + port_facts = facts["ansible_facts"]["int_status"] + assert all([port["admin_state"] == "down" for port in port_facts.values()]) + + def revert(self): + self.duthost.no_shutdown_multiple(self.ports) + + def __str__(self): + return "vlan_port_down:{}".format(len(self.ports)) + + +class NeighVlanMemberDown(VlanMemberDown): + """ Sad path test case to verify warm-reboot when vlan member port goes operationaly down + by shutting down the corresponding port on the neighbor side. """ + + def __init__(self, duthost, fanouthosts, port_selector): + super(NeighVlanMemberDown, self).__init__(duthost, port_selector) + self.fanouthosts = fanouthosts + logger.info("Selected ports for neighbor vlan member down case {}".format(self.ports)) + + def setup(self, test_data): + super(NeighVlanMemberDown, self).setup(test_data) + + for port in self.ports: + fanout, fanport = fanout_switch_port_lookup(self.fanouthosts, self.duthost.hostname, port) + fanout.shutdown(fanport) + + def verify(self): + facts = self.duthost.show_interface(command="status", interfaces=self.ports) + port_facts = facts["ansible_facts"]["int_status"] + assert all([port["oper_state"] == "down" for port in port_facts.values()]) + + def revert(self): + for port in self.ports: + fanout, fanport = fanout_switch_port_lookup(self.fanouthosts, self.duthost.hostname, port) + fanout.no_shutdown(fanport) + + def __str__(self): + return "neigh_vlan_member_down:{}".format(len(self.ports)) + + +class LagMemberDown(SadOperation): + """ Base class for LAG member down sad path scenarios. """ + + def __init__(self, duthost, nbrhosts, vm_selector, port_selector): + super(LagMemberDown, self).__init__() + vms = {vm: nbrhosts[vm] for vm in vm_selector.select(list(nbrhosts))} + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + dut_port_to_neighbor = mg_facts["minigraph_neighbors"] + lags = mg_facts["minigraph_portchannels"] + + # Build neighbor hostname to DUT LAG mapping and + # DUT port to DUT LAG mapping. + neigh_to_lag = {} + port_to_lag = {} + for dut_port, neigh_info in dut_port_to_neighbor.items(): + for lag in lags.values(): + if dut_port in lag["members"]: + neigh_to_lag[neigh_info["name"]] = lag + port_to_lag[dut_port] = lag + + ports = [] + for vm in vms: + ports.extend(port_selector.select(neigh_to_lag[vm]["members"])) + + self.vms = vms + self.ports = ports + self.duthost = duthost + self.nbrhosts = nbrhosts + self.neigh_to_lag = neigh_to_lag + self.port_to_lag = port_to_lag + + def setup(self, test_data): + lags = test_data["portchannel_interfaces"] + peer_dev_info = test_data["peer_dev_info"] + + # Exclude down LAG members + for lag in lags.values(): + lag["members"] = list(set(lag["members"]) - set(self.ports)) + + # Exclude VMs corresponding to down LAGs + for vm in self.vms: + peer_dev_info.pop(vm) + + def verify(self): + lag_facts = self.duthost.lag_facts(host=self.duthost.hostname)["ansible_facts"]["lag_facts"] + port_facts = self.duthost.show_interface(command="status")["ansible_facts"]["int_status"] + + for port in self.ports: + lag = self.port_to_lag[port] + port_stats = lag_facts["lags"][lag["name"]]["po_stats"]["ports"][port] + assert not port_stats["runner"]["aggregator"]["selected"] + + for vm in self.vms: + assert port_facts[self.neigh_to_lag[vm]["name"]]["oper_state"] == "down" + nbrhost = self.nbrhosts[vm]["host"] + # TODO: remove this hardcode, implement a mapping of DUT LAG to VM LAG. + nbr_lag_name = "Port-Channel1" + commands = ["show interface {} | json".format(nbr_lag_name)] + output = nbrhost.eos_command(commands=commands)["stdout"][0] + state = output["interfaces"][nbr_lag_name]["interfaceStatus"] + assert state in ["notconnect"] + + +class DutLagMemberDown(LagMemberDown): + """ Sad path to test warm-reboot when LAG member on DUT is shutdown + and verify that after warm-reboot LAG member state is still down on DUT and neighbor. """ + + def __init__(self, duthost, nbrhosts, vm_selector, port_selector): + super(DutLagMemberDown, self).__init__(duthost, nbrhosts, vm_selector, port_selector) + logger.info("Selected ports for DUT LAG member down case {}".format(self.ports)) + + def setup(self, test_data): + super(DutLagMemberDown, self).setup(test_data) + self.duthost.shutdown_multiple(self.ports) + + def revert(self): + self.duthost.no_shutdown_multiple(self.ports) + + def __str__(self): + return "dut_lag_member_down:{}:{}".format(len(self.vms), len(self.ports)) + + +class NeighLagMemberDown(LagMemberDown): + """ Sad path to test warm-reboot when LAG member on neighbor is shutdown + and verify that after warm-reboot LAG member state is still down on DUT and neighbor. """ + + def __init__(self, duthost, nbrhosts, fanouthosts, vm_selector, port_selector): + super(NeighLagMemberDown, self).__init__(duthost, nbrhosts, vm_selector, port_selector) + logger.info("Selected ports for neighbor LAG member down case {}".format(self.ports)) + + mg_facts = self.duthost.minigraph_facts(host=self.duthost.hostname)["ansible_facts"] + mg_neighs = mg_facts["minigraph_neighbors"].items() + + self.fanouthosts = fanouthosts + self.dut_port_to_nbr = {port: nbr_info["name"] for port, nbr_info in mg_neighs} + self.dut_port_to_nbr_port = {port: nbr_info["port"] for port, nbr_info in mg_neighs} + + def setup(self, test_data): + super(NeighLagMemberDown, self).setup(test_data) + self._change_ports_state(bring_up=False) + + def revert(self): + self._change_ports_state(bring_up=True) + + def _change_ports_state(self, bring_up): + for port in self.ports: + nbrname = self.dut_port_to_nbr[port] + nbrport = self.dut_port_to_nbr_port[port] + nbrhost = self.nbrhosts[nbrname]["host"] + if bring_up: + nbrhost.no_shutdown(nbrport) + else: + nbrhost.shutdown(nbrport) + + fanout, fanport = fanout_switch_port_lookup(self.fanouthosts, self.duthost.hostname, port) + if bring_up: + fanout.no_shutdown(fanport) + else: + fanout.shutdown(fanport) + + + def __str__(self): + return "neigh_lag_member_down:{}:{}".format(len(self.vms), len(self.ports)) diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py index 4da98e65e21..6a9cf8f65c7 100644 --- a/tests/platform_tests/test_advanced_reboot.py +++ b/tests/platform_tests/test_advanced_reboot.py @@ -6,6 +6,15 @@ from tests.platform_tests.verify_dut_health import verify_dut_health # lgtm[py/unused-import] from tests.platform_tests.verify_dut_health import add_fail_step_to_reboot # lgtm[py/unused-import] +from tests.common.helpers.sad_path import ( + DutVlanMemberDown, + NeighVlanMemberDown, + DutLagMemberDown, + NeighLagMemberDown, + PhyPropsPortSelector, + DatetimeSelector, +) + pytestmark = [ pytest.mark.disable_loganalyzer, pytest.mark.topology('t0') @@ -68,13 +77,11 @@ def test_cancelled_warm_reboot(request, add_fail_step_to_reboot, verify_dut_heal ### Tetcases to verify reboot procedure with SAD cases ### def test_warm_reboot_sad(request, get_advanced_reboot, verify_dut_health, - backup_and_restore_config_db, advanceboot_neighbor_restore): + backup_and_restore_config_db, advanceboot_neighbor_restore, + duthost, fanouthosts, nbrhosts): ''' Warm reboot with sad path - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' @@ -84,9 +91,14 @@ def test_warm_reboot_sad(request, get_advanced_reboot, verify_dut_health, 'dut_bgp_down', # Shutdown single BGP session on DUT brefore rebooting it 'dut_lag_down', # Shutdown single LAG session on DUT brefore rebooting it 'neigh_lag_down', # Shutdown single LAG session on remote device (VM) before reboot DUT - 'dut_lag_member_down:1:1', # Shutdown 1 LAG member corresponding to 1 remote device (VM) on DUT - 'neigh_lag_member_down:1:1', # Shutdown 1 LAG member on 1 remote device (VM) - 'vlan_port_down', # Shutdown 1 vlan port (interface) on DUT + # Shutdown 1 LAG member corresponding to 1 remote device (VM) on DUT + DutLagMemberDown(duthost, nbrhosts, DatetimeSelector(1), PhyPropsPortSelector(duthost, 1)), + # Shutdown 1 LAG member on 1 remote device (VM) + NeighLagMemberDown(duthost, nbrhosts, fanouthosts, DatetimeSelector(1), PhyPropsPortSelector(duthost, 1)), + # Shutdown 1 vlan port (interface) on DUT + DutVlanMemberDown(duthost, PhyPropsPortSelector(duthost, 1)), + # Shutdown 1 vlan port (interface) on fanout + NeighVlanMemberDown(duthost, fanouthosts, PhyPropsPortSelector(duthost, 1)), ] advancedReboot.runRebootTestcase( @@ -96,13 +108,11 @@ def test_warm_reboot_sad(request, get_advanced_reboot, verify_dut_health, def test_warm_reboot_multi_sad(request, get_advanced_reboot, verify_dut_health, - backup_and_restore_config_db, advanceboot_neighbor_restore): + backup_and_restore_config_db, advanceboot_neighbor_restore, + duthost, fanouthosts, nbrhosts): ''' Warm reboot with multi sad path - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' @@ -113,17 +123,20 @@ def test_warm_reboot_multi_sad(request, get_advanced_reboot, verify_dut_health, 'dut_bgp_down:3', # Shutdown 3 BGP sessions on DUT brefore rebooting it 'dut_lag_down:2', # Shutdown 2 LAG sessions on DUT brefore rebooting it 'neigh_lag_down:3', # Shutdown 1 LAG session on 3 remote devices (VMs) before reboot DUT - 'dut_lag_member_down:3:1', # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) - # on DUT - 'neigh_lag_member_down:2:1', # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) - 'vlan_port_down:4', + # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) + # on DUT + DutLagMemberDown(duthost, nbrhosts, DatetimeSelector(3), PhyPropsPortSelector(duthost, 1)), + # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) + NeighLagMemberDown(duthost, nbrhosts, fanouthosts, DatetimeSelector(2), PhyPropsPortSelector(duthost, 1)), + DutVlanMemberDown(duthost, PhyPropsPortSelector(duthost, 4)), + NeighVlanMemberDown(duthost, fanouthosts, PhyPropsPortSelector(duthost, 4)), ] + ([ - 'dut_lag_member_down:2:{0}'.format(lagMemberCnt), - # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote - # devices (VM) on DUT - 'neigh_lag_member_down:3:{0}'.format(lagMemberCnt), - # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) - # (1 each) + # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote + # devices (VM) on DUT + DutLagMemberDown(duthost, nbrhosts, DatetimeSelector(2), PhyPropsPortSelector(duthost, lagMemberCnt)), + # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) + # (1 each) + NeighLagMemberDown(duthost, nbrhosts, fanouthosts, DatetimeSelector(3), PhyPropsPortSelector(duthost, lagMemberCnt)), ] if advancedReboot.getTestbedType() in ['t0-64', 't0-116', 't0-64-32'] else []) advancedReboot.runRebootTestcase( @@ -159,9 +172,6 @@ def test_warm_reboot_sad_bgp(request, get_advanced_reboot, verify_dut_health, ''' Warm reboot with sad (bgp) - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' @@ -178,29 +188,29 @@ def test_warm_reboot_sad_bgp(request, get_advanced_reboot, verify_dut_health, def test_warm_reboot_sad_lag_member(request, get_advanced_reboot, verify_dut_health, - backup_and_restore_config_db, advanceboot_neighbor_restore): + backup_and_restore_config_db, advanceboot_neighbor_restore, + duthost, fanouthosts, nbrhosts): ''' Warm reboot with sad path (lag member) - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' advancedReboot = get_advanced_reboot(rebootType='warm-reboot') lagMemberCnt = advancedReboot.getlagMemberCnt() prebootList = [ - 'dut_lag_member_down:3:1', # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) - # on DUT - 'neigh_lag_member_down:2:1', # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) + # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) + # on DUT + DutLagMemberDown(duthost, nbrhosts, DatetimeSelector(3), PhyPropsPortSelector(duthost, 1)), + # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) + NeighLagMemberDown(duthost, nbrhosts, fanouthosts, DatetimeSelector(2), PhyPropsPortSelector(duthost, 1)), ] + ([ - 'dut_lag_member_down:2:{0}'.format(lagMemberCnt), - # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote - # devices (VM) on DUT - 'neigh_lag_member_down:3:{0}'.format(lagMemberCnt), - # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) - # (1 each) + # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote + # devices (VM) on DUT + DutLagMemberDown(duthost, nbrhosts, DatetimeSelector(2), PhyPropsPortSelector(duthost, lagMemberCnt)), + # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) + # (1 each) + NeighLagMemberDown(duthost, nbrhosts, fanouthosts, DatetimeSelector(3), PhyPropsPortSelector(duthost, lagMemberCnt)), ] if advancedReboot.getTestbedType() in ['t0-64', 't0-116', 't0-64-32'] else []) advancedReboot.runRebootTestcase( @@ -214,9 +224,6 @@ def test_warm_reboot_sad_lag(request, get_advanced_reboot, verify_dut_health, ''' Warm reboot with sad path (lag) - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' @@ -233,19 +240,17 @@ def test_warm_reboot_sad_lag(request, get_advanced_reboot, verify_dut_health, def test_warm_reboot_sad_vlan_port(request, get_advanced_reboot, verify_dut_health, - backup_and_restore_config_db): + backup_and_restore_config_db, duthost, fanouthosts): ''' Warm reboot with sad path (vlan port) - prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. - For non lag member cases, this parameter will be skipped - @param request: Spytest commandline argument @param get_advanced_reboot: advanced reboot test fixture ''' advancedReboot = get_advanced_reboot(rebootType='warm-reboot') prebootList = [ - 'vlan_port_down:4', # Shutdown 4 vlan ports (interfaces) on DUT + DutVlanMemberDown(duthost, PhyPropsPortSelector(duthost, 4)), # Shutdown 4 vlan ports (interfaces) on DUT + NeighVlanMemberDown(duthost, fanouthosts, PhyPropsPortSelector(duthost, 4)), # Shutdown 4 vlan ports (interfaces) on fanout ] advancedReboot.runRebootTestcase( From 134e2b358d9f716eb682020e4ea1299896ed9243 Mon Sep 17 00:00:00 2001 From: "Nana@Nvidia" <78413612+nhe-NV@users.noreply.github.com> Date: Mon, 16 Aug 2021 23:39:45 +0800 Subject: [PATCH 115/117] Increase the wait time from 120s to 180s for test_turn_off_psu_and_check_psu_info (#3981) Description of PR Summary: Increase the wait time from 120s to 180s for test_turn_off_psu_and_check_psu_info Fixes # (issue) If the testbed has lots ports, it will take more than 120s to execute the _check_psu_status_after_power_off Approach What is the motivation for this PR? If the testbed has lots ports, it will take more than 120s to execute the _check_psu_status_after_power_off, if the timeout value for the wait_until is still 120, then the _check_psu_status_after_power_off will be execute only one time, it will casue the testcase failure, Increase the timeout from 120 to 180 to make sure the _check_psu_status_after_power_off can be executed more than one time. How did you do it? Increase the wait time from 120s to 180s for test_turn_off_psu_and_check_psu_info How did you verify/test it? Run the the testcase and it can pass: py.test snmp/test_snmp_phy_entity.py --inventory "../ansible/inventory, ../ansible/veos" --host-pattern r-tigris-13 --module-path ../ansible/library/ --testbed r-tigris-13-t1-lag --testbed_file ../ansible/testbed.csv --allow_recover --junit-xml junit_5038854_0.7.1.1.13.1.4.3.7.1.1.xml --assert plain --log-cli-level debug --show-capture=no -ra --showlocals --clean-alluredir --alluredir=/tmp/allure-results --allure_server_addr="10.215.11.120" --allure_server_project_id r-tigris-13-snmp-test-snmp-phy-entity-py -k "test_turn_off_psu_and_check_psu_info" --- tests/snmp/test_snmp_phy_entity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/snmp/test_snmp_phy_entity.py b/tests/snmp/test_snmp_phy_entity.py index cf3da5797cd..624165341a6 100644 --- a/tests/snmp/test_snmp_phy_entity.py +++ b/tests/snmp/test_snmp_phy_entity.py @@ -615,7 +615,7 @@ def test_turn_off_psu_and_check_psu_info(duthosts, enum_rand_one_per_hwsku_hostn pdu_controller.turn_off_outlet(first_outlet) assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet, False) # wait for psud update the database - assert wait_until(120, 20, _check_psu_status_after_power_off, duthost, localhost, creds_all_duts) + assert wait_until(180, 20, _check_psu_status_after_power_off, duthost, localhost, creds_all_duts) def _check_psu_status_after_power_off(duthost, localhost, creds_all_duts): From e7e9d5d5c8372645a52e34a44f0286ce99a55551 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Mon, 16 Aug 2021 12:35:12 -0700 Subject: [PATCH 116/117] [veos]: correct veos disk location (#4048) disk_image_dir and cdrom_image should not contain home_path as the root_path can be absolute path. the issue is seem only when configure root_path as absolute path. bug was introduced in #3036 Signed-off-by: Guohan Lu --- ansible/roles/vm_set/tasks/start.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/vm_set/tasks/start.yml b/ansible/roles/vm_set/tasks/start.yml index a03fa3aeef8..644870e55d1 100644 --- a/ansible/roles/vm_set/tasks/start.yml +++ b/ansible/roles/vm_set/tasks/start.yml @@ -94,8 +94,8 @@ mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}" serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}" src_disk_image: "{{ root_path }}/images/{{ src_image_name }}" - disk_image_dir: "{{ home_path }}/{{ root_path }}/disks" - cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}" + disk_image_dir: "{{ root_path }}/disks" + cdrom_image: "{{ root_path }}/images/{{ cd_image_filename }}" mgmt_tap: "{{ vm_name }}-m" backplane_tap: "{{ vm_name }}-back" with_items: "{{ VM_hosts }}" From 741c735b8bf2385e389b252a9c7a0816641cdf90 Mon Sep 17 00:00:00 2001 From: Blueve <672454911@qq.com> Date: Tue, 17 Aug 2021 10:04:37 +0800 Subject: [PATCH 117/117] [console] Fix issue where the reverse ssh failed due to invalid hostkey (#4047) Signed-off-by: Jing Kan jika@microsoft.com --- tests/console/test_console_loopback.py | 2 +- tests/console/test_console_reversessh.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/console/test_console_loopback.py b/tests/console/test_console_loopback.py index 031f37b827e..91abb222af0 100644 --- a/tests/console/test_console_loopback.py +++ b/tests/console/test_console_loopback.py @@ -82,7 +82,7 @@ def test_console_loopback_pingpong(duthost, creds, src_line, dst_line): def create_ssh_client(ip, user, pwd): # Set 'echo=False' is very important since pexpect will echo back all inputs to buffer by default - client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no'.format(user, ip), echo=False) + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'.format(user, ip), echo=False) client.expect('[Pp]assword:') client.sendline(pwd) return client diff --git a/tests/console/test_console_reversessh.py b/tests/console/test_console_reversessh.py index a23abd6550a..ac48ec0b537 100644 --- a/tests/console/test_console_reversessh.py +++ b/tests/console/test_console_reversessh.py @@ -60,7 +60,7 @@ def test_console_reversessh_force_interrupt(duthost, creds, target_line): ressh_user = "{}:{}".format(dutuser, target_line) try: - client = pexpect.spawn('ssh {}@{}'.format(ressh_user, dutip)) + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'.format(ressh_user, dutip)) client.expect('[Pp]assword:') client.sendline(dutpass)