From 18f54c91999dda907cde3e37a7cd08f3411fcafd Mon Sep 17 00:00:00 2001
From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com>
Date: Thu, 26 May 2022 15:05:45 +0800
Subject: [PATCH 01/23] [dualtor][minigraph] Add SoC addr minigraph support
(#5614)
Approach
What is the motivation for this PR?
Add minigraph generation support to include mux cable type and soc address details.
Signed-off-by: Longxiang Lyu lolv@microsoft.com
How did you do it?
Include mux_cable_facts into dual_tor_facts
```
modify minigraph templates to include mux cable type and soc address details with the following schema proposal:
SmartCable
active-active
192.168.0.3/21
::/0
0.0.0.0/0
::/0
svcstr-7050-acs-1-Servers0-SC
Server
192.168.0.2/21
fc02:1000::2/64
0.0.0.0/0
Servers0
```
How did you verify/test it?
This PR depends on: Azure/sonic-buildimage#10776
---
ansible/library/dual_tor_facts.py | 31 ++++++++++++++++++++++++++++++
ansible/templates/minigraph_png.j2 | 20 ++++++++++++++++++-
2 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/ansible/library/dual_tor_facts.py b/ansible/library/dual_tor_facts.py
index 1629959fdb1..76978a2f90b 100644
--- a/ansible/library/dual_tor_facts.py
+++ b/ansible/library/dual_tor_facts.py
@@ -1,4 +1,26 @@
+import os
+import yaml
+
from collections import defaultdict
+
+try:
+ from ansible.module_utils.dualtor_utils import generate_mux_cable_facts
+except ImportError:
+ # Add parent dir for using outside Ansible
+ import sys
+ sys.path.append('..')
+ from ansible.module_utils.dualtor_utils import generate_mux_cable_facts
+
+
+def load_topo_file(topo_name):
+ """Load topo definition yaml file."""
+ topo_file = "vars/topo_%s.yml" % topo_name
+ if not os.path.exists(topo_file):
+ raise ValueError("Topo file %s not exists" % topo_file)
+ with open(topo_file) as fd:
+ return yaml.safe_load(fd)
+
+
class DualTorParser:
def __init__(self, hostname, testbed_facts, host_vars, vm_config, port_alias, vlan_intfs):
@@ -64,6 +86,14 @@ def generate_cable_names(self):
self.dual_tor_facts['cables'] = cables
+ def generate_mux_cable_facts(self):
+ topo_name = self.testbed_facts["topo"]
+ # use mux_cable_facts only for dualtor mixed topologies
+ if "mixed" in topo_name:
+ topology = load_topo_file(topo_name)["topology"]
+ mux_cable_facts = generate_mux_cable_facts(topology=topology)
+ self.dual_tor_facts["mux_cable_facts"] = mux_cable_facts
+
def get_dual_tor_facts(self):
'''
Gathers facts related to a dual ToR configuration
@@ -73,6 +103,7 @@ def get_dual_tor_facts(self):
self.parse_tor_position()
self.generate_cable_names()
self.parse_loopback_ips()
+ self.generate_mux_cable_facts()
return self.dual_tor_facts
diff --git a/ansible/templates/minigraph_png.j2 b/ansible/templates/minigraph_png.j2
index 51a35cadb19..e93c40c83fa 100644
--- a/ansible/templates/minigraph_png.j2
+++ b/ansible/templates/minigraph_png.j2
@@ -139,12 +139,21 @@
{% set server_base_address_v6 = (vlan_configs.values() | list)[0]['prefix_v6'] %}
{% set server_base_address_v6 = server_base_address_v6 | ipaddr('network') %}
{% set server_base_address_v6 = ':'.join(server_base_address_v6.split(':')[:-1]) + ':{:x}' %}
+{% set mux_cable_facts = dual_tor_facts['mux_cable_facts'] if 'mux_cable_facts' in dual_tor_facts %}
{% for cable in dual_tor_facts['cables'] %}
+{% set intf_index = port_alias.index(cable['dut_intf'])|string %}
SmartCable
+{% if mux_cable_facts is defined %}
+ {{ mux_cable_facts[intf_index]['cable_type'] }}
+
+ {{ mux_cable_facts[intf_index]['soc_ipv4'] if 'soc_ipv4' in mux_cable_facts[intf_index] else '0.0.0.0/0' }}
+
+{% else %}
0.0.0.0/0
+{% endif %}
::/0
@@ -159,12 +168,21 @@
Server
+{% if mux_cable_facts is defined %}
+
+ {{ mux_cable_facts[intf_index]['server_ipv4'] }}
+
+
+ {{ mux_cable_facts[intf_index]['server_ipv6'] }}
+
+{% else %}
{{ server_base_address_v4.format(loop.index + 1) }}/26
-
+
{{ server_base_address_v6.format(loop.index + 1) }}/96
+{% endif %}
0.0.0.0/0
From 94073fa1ca0f7889cbc02ec9a3554cac6e3e3746 Mon Sep 17 00:00:00 2001
From: Zhaohui Sun <94606222+ZhaohuiS@users.noreply.github.com>
Date: Thu, 26 May 2022 16:39:02 +0800
Subject: [PATCH 02/23] raise exception with detailed info for user if virtual
env for python3 doesn't exist (#5703)
What is the motivation for this PR?
The latest docker-ptf image has virtual environment for python3, but if user runs test case in an old docker-ptf image without virtual environment, it will call ptf command which doesn't exist in docker-ptf container and test case will fail.
How did you do it?
Check if /root/env-python3/bin/ptf exists, throw exception if it doesn't exist.
It also prints out the detailed information for user.
How did you verify/test it?
run dhcp_relay/test_dhcp_relay.py
Signed-off-by: Zhaohui Sun
---
tests/ptf_runner.py | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py
index 03aca3a25fc..f5589e3c260 100644
--- a/tests/ptf_runner.py
+++ b/tests/ptf_runner.py
@@ -21,7 +21,14 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={},
# ptf will load all scripts under ptftests, it will throw error for py2 scripts.
# So move migrated scripts to seperated py3 folder avoid impacting py2 scripts.
if is_python3:
- cmd = "/root/env-python3/bin/ptf --test-dir {} {}".format(testdir+'/py3', testname)
+ path_exists = host.stat(path="/root/env-python3/bin/ptf")
+ if path_exists["stat"]["exists"]:
+ cmd = "/root/env-python3/bin/ptf --test-dir {} {}".format(testdir+'/py3', testname)
+ else:
+ error_msg = "Virtual environment for Python3 /root/env-python3/bin/ptf doesn't exist.\nPlease check and update docker-ptf image, make sure to use the correct one."
+ logger.error("Exception caught while executing case: {}. Error message: {}"\
+ .format(testname, error_msg))
+ raise Exception(error_msg)
else:
cmd = "ptf --test-dir {} {}".format(testdir, testname)
From f4c5f6cb076a4f591604c19b9c6bab0d8607116d Mon Sep 17 00:00:00 2001
From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com>
Date: Thu, 26 May 2022 17:01:32 -0700
Subject: [PATCH 03/23] Modify minigraph_dpg_asic template to generate unique
port channel (#5705)
What is the motivation for this PR?
snmp_interfaces test case fails on supervisor on packet chassis because the internal Portchannel name generated is not unique for the supervisor. The portchannel names generated is unique within a namespace but not across namespaces. SNMP expects the portchannel names to unique as the port channel index used in snmp output has to be unique.
How did you do it?
Modify dpg_asic template to generate unique port channel index for supervisor node with maximum of 16 asics.
How did you verify/test it?
Tested on multi-asic platform and linecard to ensure unique port channel indices are generated.
---
ansible/templates/minigraph_dpg_asic.j2 | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/ansible/templates/minigraph_dpg_asic.j2 b/ansible/templates/minigraph_dpg_asic.j2
index b4c38869ec3..86b9b719783 100644
--- a/ansible/templates/minigraph_dpg_asic.j2
+++ b/ansible/templates/minigraph_dpg_asic.j2
@@ -1,6 +1,7 @@
-{# Note max of 10 Backend Portchannel from one asic #}
+{# Note max of 16 Backend Portchannel from one asic #}
{% macro port_channel_id(asic_idx, neigh_asic_idx) -%}
-{{ ((40 + 10 * asic_idx + neigh_asic_idx)|string) }}
+{# Note avoid PortChannel00 #}
+{{ ((1 + 16 * asic_idx + neigh_asic_idx)|string) }}
{%- endmacro -%}
{% if num_asics > 1 %}
{% if (asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined) %}
From 267cef267483a4527efbab2c4dea26e30055523f Mon Sep 17 00:00:00 2001
From: Ramsiddarth <87841726+rraguraj@users.noreply.github.com>
Date: Fri, 27 May 2022 07:56:18 +0530
Subject: [PATCH 04/23] cisco hwsku change (#5718)
Co-authored-by: rraguraj
---
ansible/group_vars/sonic/variables | 4 ++--
ansible/module_utils/port_utils.py | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables
index 6e314a69390..0acbd7b27c8 100644
--- a/ansible/group_vars/sonic/variables
+++ b/ansible/group_vars/sonic/variables
@@ -23,8 +23,8 @@ barefoot_hwskus: [ "montara", "mavericks", "Arista-7170-64C", "newport", "Arista
marvell_hwskus: [ "et6448m" ]
-cisco_hwskus: ["64x100Gb"]
-cisco-8000_gb_hwskus: ["64x100Gb"]
+cisco_hwskus: ["Cisco-8102-C64"]
+cisco-8000_gb_hwskus: ["Cisco-8102-C64"]
## Note:
## Docker volumes should be list instead of dict. However, if we want to keep code DRY, we
diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py
index 296976fceee..cd4270e0368 100644
--- a/ansible/module_utils/port_utils.py
+++ b/ansible/module_utils/port_utils.py
@@ -199,7 +199,7 @@ def get_port_alias_to_name_map(hwsku, asic_name=None):
elif hwsku == "36x100Gb":
for i in range(0, 36):
port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i
- elif hwsku == "64x100Gb":
+ elif hwsku == "Cisco-8102-C64":
for i in range(0, 64):
port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i
elif hwsku in ["8800-LC-48H-O", "88-LC0-36FH-MO"]:
From a36bf5d5c6f1d94000e1eacb2791fc19383f0c5d Mon Sep 17 00:00:00 2001
From: judyjoseph <53951155+judyjoseph@users.noreply.github.com>
Date: Thu, 26 May 2022 19:38:13 -0700
Subject: [PATCH 05/23] Update macsec tests to support t2 topology and
multi-asic (#5530)
* Macsec testcases to support t2_2lc_min topology
---
tests/common/devices/multi_asic.py | 20 ++++++++++++
tests/macsec/__init__.py | 10 ++++++
tests/macsec/macsec_config_helper.py | 42 +++++++++++++++-----------
tests/macsec/macsec_helper.py | 35 +++++++++++++--------
tests/macsec/macsec_platform_helper.py | 15 ++++++---
tests/macsec/test_macsec.py | 11 ++++---
6 files changed, 94 insertions(+), 39 deletions(-)
diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py
index 76751965666..6312c55d735 100644
--- a/tests/common/devices/multi_asic.py
+++ b/tests/common/devices/multi_asic.py
@@ -113,6 +113,26 @@ def _run_on_asics(self, *module_args, **complex_args):
else:
raise ValueError("Argument 'asic_index' must be an int or string 'all'.")
+ def get_dut_iface_mac(self, iface_name):
+ """
+ Gets the MAC address of specified interface.
+
+ Returns:
+ str: The MAC address of the specified interface, or None if it is not found.
+ """
+ try:
+ if self.sonichost.facts['num_asic'] == 1:
+ cmd_prefix = " "
+ else:
+ asic = self.get_port_asic_instance(iface_name)
+ cmd_prefix = "sudo ip netns exec {} ".format(asic.namespace)
+
+ mac = self.command('{} cat /sys/class/net/{}/address'.format(cmd_prefix, iface_name))['stdout']
+ return mac
+ except Exception as e:
+ logger.error('Failed to get MAC address for interface "{}", exception: {}'.format(iface_name, repr(e)))
+ return None
+
def get_frontend_asic_ids(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_ASIC_ID]
diff --git a/tests/macsec/__init__.py b/tests/macsec/__init__.py
index 81d49550626..eabdfa246c6 100644
--- a/tests/macsec/__init__.py
+++ b/tests/macsec/__init__.py
@@ -146,6 +146,12 @@ def find_links(self, duthost, tbinfo, filter):
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
filter(interface, neighbor, mg_facts, tbinfo)
+ def is_interface_portchannel_member(self, pc, interface):
+ for pc_name, elements in pc.items():
+ if interface in elements['members']:
+ return True
+ return False
+
def find_links_from_nbr(self, duthost, tbinfo, nbrhosts):
links = collections.defaultdict(dict)
@@ -153,6 +159,10 @@ def filter(interface, neighbor, mg_facts, tbinfo):
if neighbor["name"] not in nbrhosts.keys():
return
port = mg_facts["minigraph_neighbors"][interface]["port"]
+
+ # Currently in t2 topology macsec is validated on regular interfaces. To remove this once it is validated with PC.
+ if tbinfo["topo"]["type"] == "t2" and is_interface_portchannel_member(mg_facts['minigraph_portchannels'], interface):
+ return
links[interface] = {
"name": neighbor["name"],
"host": nbrhosts[neighbor["name"]]["host"],
diff --git a/tests/macsec/macsec_config_helper.py b/tests/macsec/macsec_config_helper.py
index f6c09b0327a..28b8fa2c9bc 100644
--- a/tests/macsec/macsec_config_helper.py
+++ b/tests/macsec/macsec_config_helper.py
@@ -2,7 +2,7 @@
from tests.common.utilities import wait_until
from tests.common.devices.eos import EosHost
from macsec_platform_helper import global_cmd
-from macsec_helper import get_mka_session
+from macsec_helper import get_mka_session, getns_prefix
__all__ = [
@@ -17,7 +17,7 @@
]
-def set_macsec_profile(host, profile_name, priority, cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period = 0):
+def set_macsec_profile(host, port, profile_name, priority, cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period = 0):
if isinstance(host, EosHost):
eos_cipher_suite = {
"GCM-AES-128": "aes128-gcm",
@@ -46,8 +46,8 @@ def set_macsec_profile(host, profile_name, priority, cipher_suite, primary_cak,
"send_sci": send_sci,
"rekey_period": rekey_period,
}
- cmd = "sonic-db-cli CONFIG_DB HMSET 'MACSEC_PROFILE|{}' ".format(
- profile_name)
+ cmd = "sonic-db-cli {} CONFIG_DB HMSET 'MACSEC_PROFILE|{}' ".format(
+ getns_prefix(host, port), profile_name)
for k, v in macsec_profile.items():
cmd += " '{}' '{}' ".format(k, v)
host.command(cmd)
@@ -59,14 +59,14 @@ def set_macsec_profile(host, profile_name, priority, cipher_suite, primary_cak,
host.command("lldpcli configure system bond-slave-src-mac-type real")
-def delete_macsec_profile(host, profile_name):
+def delete_macsec_profile(host, port, profile_name):
if isinstance(host, EosHost):
host.eos_config(
lines=['no profile {}'.format(profile_name)],
parents=['mac security'])
return
- cmd = "sonic-db-cli CONFIG_DB DEL 'MACSEC_PROFILE|{}'".format(profile_name)
+ cmd = "sonic-db-cli {} CONFIG_DB DEL 'MACSEC_PROFILE|{}'".format(getns_prefix(host, port), profile_name)
host.command(cmd)
@@ -77,8 +77,8 @@ def enable_macsec_port(host, port, profile_name):
parents=['interface {}'.format(port)])
return
- cmd = "sonic-db-cli CONFIG_DB HSET 'PORT|{}' 'macsec' '{}'".format(
- port, profile_name)
+ cmd = "sonic-db-cli {} CONFIG_DB HSET 'PORT|{}' 'macsec' '{}'".format(
+ getns_prefix(host, port), port, profile_name)
host.command(cmd)
@@ -89,21 +89,26 @@ def disable_macsec_port(host, port):
parents=['interface {}'.format(port)])
return
- cmd = "sonic-db-cli CONFIG_DB HDEL 'PORT|{}' 'macsec'".format(port)
+ cmd = "sonic-db-cli {} CONFIG_DB HDEL 'PORT|{}' 'macsec'".format(getns_prefix(host, port), port)
host.command(cmd)
def enable_macsec_feature(duthost, macsec_nbrhosts):
nbrhosts = macsec_nbrhosts
+ num_asics = duthost.num_asics()
global_cmd(duthost, nbrhosts, "sudo config feature state macsec enabled")
def check_macsec_enabled():
- for nbr in [n["host"] for n in nbrhosts.values()] + [duthost]:
+ if len(duthost.shell("docker ps | grep macsec | grep -v grep")["stdout_lines"]) < num_asics:
+ return False
+ if len(duthost.shell("ps -ef | grep macsecmgrd | grep -v grep")["stdout_lines"]) < num_asics:
+ return False
+ for nbr in [n["host"] for n in nbrhosts.values()]:
if isinstance(nbr, EosHost):
continue
- if len(nbr.shell("docker ps | grep macsec | grep -v grep")["stdout_lines"]) != 1:
+ if len(nbr.shell("docker ps | grep macsec | grep -v grep")["stdout_lines"]) < 1:
return False
- if len(nbr.shell("ps -ef | grep macsecmgrd | grep -v grep")["stdout_lines"]) != 1:
+ if len(nbr.shell("ps -ef | grep macsecmgrd | grep -v grep")["stdout_lines"]) < 1:
return False
return True
assert wait_until(180, 5, 10, check_macsec_enabled)
@@ -115,13 +120,14 @@ def disable_macsec_feature(duthost, macsec_nbrhosts):
def cleanup_macsec_configuration(duthost, ctrl_links, profile_name):
devices = set()
- devices.add(duthost)
+ if duthost.facts["asic_type"] == "vs":
+ devices.add(duthost)
for dut_port, nbr in ctrl_links.items():
disable_macsec_port(duthost, dut_port)
disable_macsec_port(nbr["host"], nbr["port"])
- delete_macsec_profile(nbr["host"], profile_name)
+ delete_macsec_profile(nbr["host"], nbr["port"], profile_name)
devices.add(nbr["host"])
- delete_macsec_profile(duthost, profile_name)
+ delete_macsec_profile(duthost, dut_port, profile_name)
# Waiting for all mka session were cleared in all devices
for d in devices:
if isinstance(d, EosHost):
@@ -131,16 +137,16 @@ def cleanup_macsec_configuration(duthost, ctrl_links, profile_name):
def setup_macsec_configuration(duthost, ctrl_links, profile_name, default_priority,
cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period):
- set_macsec_profile(duthost, profile_name, default_priority,
- cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period)
i = 0
for dut_port, nbr in ctrl_links.items():
+ set_macsec_profile(duthost, dut_port, profile_name, default_priority,
+ cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period)
enable_macsec_port(duthost, dut_port, profile_name)
if i % 2 == 0:
priority = default_priority - 1
else:
priority = default_priority + 1
- set_macsec_profile(nbr["host"], profile_name, priority,
+ set_macsec_profile(nbr["host"], nbr["port"], profile_name, priority,
cipher_suite, primary_cak, primary_ckn, policy, send_sci, rekey_period)
enable_macsec_port(nbr["host"], nbr["port"], profile_name)
wait_until(20, 3, 0,
diff --git a/tests/macsec/macsec_helper.py b/tests/macsec/macsec_helper.py
index 98a1646d6b7..89effce71b6 100644
--- a/tests/macsec/macsec_helper.py
+++ b/tests/macsec/macsec_helper.py
@@ -51,35 +51,44 @@ def get_sci(macaddress, port_identifer=1, order="network"):
return str(sci)
-QUERY_MACSEC_PORT = "sonic-db-cli APPL_DB HGETALL 'MACSEC_PORT_TABLE:{}'"
+QUERY_MACSEC_PORT = "sonic-db-cli {} APPL_DB HGETALL 'MACSEC_PORT_TABLE:{}'"
-QUERY_MACSEC_INGRESS_SC = "sonic-db-cli APPL_DB HGETALL 'MACSEC_INGRESS_SC_TABLE:{}:{}'"
+QUERY_MACSEC_INGRESS_SC = "sonic-db-cli {} APPL_DB HGETALL 'MACSEC_INGRESS_SC_TABLE:{}:{}'"
-QUERY_MACSEC_EGRESS_SC = "sonic-db-cli APPL_DB HGETALL 'MACSEC_EGRESS_SC_TABLE:{}:{}'"
+QUERY_MACSEC_EGRESS_SC = "sonic-db-cli {} APPL_DB HGETALL 'MACSEC_EGRESS_SC_TABLE:{}:{}'"
-QUERY_MACSEC_INGRESS_SA = "sonic-db-cli APPL_DB HGETALL 'MACSEC_INGRESS_SA_TABLE:{}:{}:{}'"
+QUERY_MACSEC_INGRESS_SA = "sonic-db-cli {} APPL_DB HGETALL 'MACSEC_INGRESS_SA_TABLE:{}:{}:{}'"
-QUERY_MACSEC_EGRESS_SA = "sonic-db-cli APPL_DB HGETALL 'MACSEC_EGRESS_SA_TABLE:{}:{}:{}'"
+QUERY_MACSEC_EGRESS_SA = "sonic-db-cli {} APPL_DB HGETALL 'MACSEC_EGRESS_SA_TABLE:{}:{}:{}'"
+def getns_prefix(host, intf):
+ ns_prefix = " "
+ if host.is_multi_asic:
+ asic = host.get_port_asic_instance(intf)
+ ns = host.get_namespace_from_asic_id(asic.asic_index)
+ ns_prefix = "-n {}".format(ns)
+
+ return ns_prefix
+
def get_appl_db(host, host_port_name, peer, peer_port_name):
port_table = sonic_db_cli(
- host, QUERY_MACSEC_PORT.format(host_port_name))
+ host, QUERY_MACSEC_PORT.format(getns_prefix(host, host_port_name), host_port_name))
host_sci = get_sci(host.get_dut_iface_mac(host_port_name))
peer_sci = get_sci(peer.get_dut_iface_mac(peer_port_name))
egress_sc_table = sonic_db_cli(
- host, QUERY_MACSEC_EGRESS_SC.format(host_port_name, host_sci))
+ host, QUERY_MACSEC_EGRESS_SC.format(getns_prefix(host, host_port_name), host_port_name, host_sci))
ingress_sc_table = sonic_db_cli(
- host, QUERY_MACSEC_INGRESS_SC.format(host_port_name, peer_sci))
+ host, QUERY_MACSEC_INGRESS_SC.format(getns_prefix(host, host_port_name), host_port_name, peer_sci))
egress_sa_table = {}
ingress_sa_table = {}
for an in range(4):
sa_table = sonic_db_cli(host, QUERY_MACSEC_EGRESS_SA.format(
- host_port_name, host_sci, an))
+ getns_prefix(host, host_port_name), host_port_name, host_sci, an))
if sa_table:
egress_sa_table[an] = sa_table
sa_table = sonic_db_cli(host, QUERY_MACSEC_INGRESS_SA.format(
- host_port_name, peer_sci, an))
+ getns_prefix(host, host_port_name), host_port_name, peer_sci, an))
if sa_table:
ingress_sa_table[an] = sa_table
return port_table, egress_sc_table, ingress_sc_table, egress_sa_table, ingress_sa_table
@@ -247,7 +256,7 @@ def create_exp_pkt(pkt, ttl):
def get_macsec_attr(host, port):
eth_src = host.get_dut_iface_mac(port)
- macsec_port = sonic_db_cli(host, QUERY_MACSEC_PORT.format(port))
+ macsec_port = sonic_db_cli(host, QUERY_MACSEC_PORT.format(getns_prefix(host, port), port))
if macsec_port["enable_encrypt"] == "true":
encrypt = 1
else:
@@ -259,10 +268,10 @@ def get_macsec_attr(host, port):
xpn_en = "XPN" in macsec_port["cipher_suite"]
sci = get_sci(eth_src)
macsec_sc = sonic_db_cli(
- host, QUERY_MACSEC_EGRESS_SC.format(port, sci))
+ host, QUERY_MACSEC_EGRESS_SC.format(getns_prefix(host, port), port, sci))
an = int(macsec_sc["encoding_an"])
macsec_sa = sonic_db_cli(
- host, QUERY_MACSEC_EGRESS_SA.format(port, sci, an))
+ host, QUERY_MACSEC_EGRESS_SA.format(getns_prefix(host, port), port, sci, an))
sak = binascii.unhexlify(macsec_sa["sak"])
sci = int(get_sci(eth_src, order="host"), 16)
if xpn_en:
diff --git a/tests/macsec/macsec_platform_helper.py b/tests/macsec/macsec_platform_helper.py
index 48015199c33..dd7e2246a88 100644
--- a/tests/macsec/macsec_platform_helper.py
+++ b/tests/macsec/macsec_platform_helper.py
@@ -34,8 +34,13 @@ def sonic_db_cli(host, cmd):
return ast.literal_eval(host.shell(cmd)["stdout_lines"][0])
-def get_all_ifnames(host):
- cmd = "ls /sys/class/net/"
+def get_all_ifnames(host, asic):
+ cmd_prefix = " "
+ if host.is_multi_asic:
+ ns = host.get_namespace_from_asic_id(asic.asic_index)
+ cmd_prefix = "sudo ip netns exec {} ".format(ns)
+
+ cmd = "{} ls /sys/class/net/".format(cmd_prefix)
output = host.command(cmd)["stdout_lines"]
ports = {
"Ethernet": [],
@@ -54,7 +59,8 @@ def get_all_ifnames(host):
def get_eth_ifname(host, port_name):
if u"x86_64-kvm_x86_64" in get_platform(host):
logging.info("Get the eth ifname on the virtual SONiC switch")
- ports = get_all_ifnames(host)
+ asic = host.get_port_asic_instance(intf)
+ ports = get_all_ifnames(host, asic)
assert port_name in ports["Ethernet"]
return ports["eth"][ports["Ethernet"].index(port_name)]
# Same as port_name
@@ -66,7 +72,8 @@ def get_macsec_ifname(host, port_name):
logging.info(
"Can only get the macsec ifname on the virtual SONiC switch")
return None
- ports = get_all_ifnames(host)
+ asic = host.get_port_asic_instance(intf)
+ ports = get_all_ifnames(host, asic)
assert port_name in ports["Ethernet"]
eth_port = ports["eth"][ports["Ethernet"].index(port_name)]
macsec_infname = "macsec_"+eth_port
diff --git a/tests/macsec/test_macsec.py b/tests/macsec/test_macsec.py
index 504fe7fdf4b..925202941f7 100644
--- a/tests/macsec/test_macsec.py
+++ b/tests/macsec/test_macsec.py
@@ -15,7 +15,7 @@
pytestmark = [
pytest.mark.macsec_required,
- pytest.mark.topology("t0"),
+ pytest.mark.topology("t0", "t2"),
]
@@ -257,12 +257,12 @@ def test_mismatch_macsec_configuration(self, duthost, unctrl_links,
disable_macsec_port(duthost, port_name)
disable_macsec_port(nbr["host"], nbr["port"])
- delete_macsec_profile(nbr["host"], profile_name)
+ delete_macsec_profile(nbr["host"], nbr["port"], profile_name)
# Set a wrong cak to the profile
primary_cak = "0" * len(primary_cak)
enable_macsec_port(duthost, port_name, profile_name)
- set_macsec_profile(nbr["host"], profile_name, default_priority,
+ set_macsec_profile(nbr["host"], nbr["port"], profile_name, default_priority,
cipher_suite, primary_cak, primary_ckn, policy, send_sci)
enable_macsec_port(nbr["host"], nbr["port"], profile_name)
@@ -277,7 +277,7 @@ def check_mka_establishment():
# Teardown
disable_macsec_port(duthost, port_name)
disable_macsec_port(nbr["host"], nbr["port"])
- delete_macsec_profile(nbr["host"], profile_name)
+ delete_macsec_profile(nbr["host"], nbr["port"], profile_name)
class TestInteropProtocol():
@@ -381,6 +381,9 @@ def test_snmp(self, duthost, ctrl_links, upstream_links, creds):
'''
Verify SNMP request/response works across interface with macsec configuration
'''
+ if duthost.is_multi_asic:
+ pytest.skip("The test is for Single ASIC devices")
+
for ctrl_port, nbr in ctrl_links.items():
if isinstance(nbr["host"], EosHost):
result = nbr["host"].eos_command(
From df76bdf457f758f0190373b7ab649d69aecce1fd Mon Sep 17 00:00:00 2001
From: Zhaohui Sun <94606222+ZhaohuiS@users.noreply.github.com>
Date: Fri, 27 May 2022 15:44:40 +0800
Subject: [PATCH 06/23] Add the introduction of docker-ptf migration (#5711)
What is the motivation for this PR?
Add the introduction of docker-ptf python3 migration.
Signed-off-by: Zhaohui Sun
---
.../roles/test/files/ptftests/py3/README.md | 174 ++++++++++++++++++
1 file changed, 174 insertions(+)
create mode 100644 ansible/roles/test/files/ptftests/py3/README.md
diff --git a/ansible/roles/test/files/ptftests/py3/README.md b/ansible/roles/test/files/ptftests/py3/README.md
new file mode 100644
index 00000000000..dfb5ec096cb
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/py3/README.md
@@ -0,0 +1,174 @@
+# Introduction of SONiC docker-ptf migration
+
+# why migrate docker-ptf
+
+
+As of January 1, 2020, Python 2 is no longer supported by the Python core team, that's why we have to migrate our code from Python2 to python3.
+
+`docker-ptf` is our first goal, because it's a separated docker container and only scripts under `ansible/role/test/files/ptftests` in `sonic-mgmt` repo run in `docker-ptf`.
+
+The dependency is not very complicated and it's much easier to start with it.
+
+# How to migrate Python3 for PTF
+
+In order to migrate docker-ptf smoothly and incrementally, we plan to do it step by step, not migrate all related scripts to python immediately, which would cause scale and major failures and impact the nightly test for master image.
+
+Migration includes 4 stages:
+
+## Stage 1: Prepare Python3 virtual environment in docker-ptf
+Add Python3 virtual environment in docker-ptf, we will keep Python2 in this stage for incremental migration.
+
+[PR](https://github.com/Azure/sonic-buildimage/pull/10599) to address this.
+
+`/root/env-python3/bin/ptf` is installed and will be used for `ptftests` Python3 scripts.
+
+`ptf` version is `0.9.3`, `scapy 2.4.5` is also installed into virtual environment.
+
+This stage was completed.
+
+
+## Stage 2: Migrate `ptftests` scripts one by one, feature by feature
+
+There are 36 scripts under `ansible/role/test/files/ptftests` in `sonic-mgmt` repo, including nearly 20 features.
+There are 11 scripts under `ansible/role/test/files/acstests` as well.
+
+
+**1. use `2to3` to covert `ptftests` script automatically**
+
+2to3 is a tool which can covert script from Python2 to Python3 automatically.
+
+The command looks like this:
+`2to3 --write --nobackups ansible/role/test/files/ptftests/your_script`
+
+Here is the [doc](https://docs.python.org/3/library/2to3.html) for 2to3.
+
+
+If it is not available on your host, you need to first install the following packages:
+
+
+```
+apt install 2to3
+apt install python3-lib2to3
+apt install python3-toolz
+```
+
+For windows just install 2to3 package:
+
+`pip install 2to3`
+
+Then you can check changes with `git diff`
+
+**2. move your modified `ptftest` script to `ansible/role/test/files/ptftests/py3`**
+
+`ansible/role/test/files/ptftests/py3` is a new added subfolder for Python3 scripts.
+
+`ptf` command will load all scripts under `--test-dir` before running test. It will fail if some modules can't be imported.
+
+For Python3 scripts, they will call `/root/env-python3/bin/ptf` command, `--test-dir` is `ptftests/py3`, it only loads all script under `ptftests/py3`, don't check the scripts under `ptftests`.
+
+But for left Python2 scripts, they will still call `ptf` command, `--test-dir` is `ptftests`, it will load all scripts under `ptftests` even scripts under subfolder `py3`.
+So make sure it doesn't have incompatible issue when running `ptf` command.
+
+Suggest to run some old Python2 scripts which call `ptf` command after you finish the migration of your scripts. This can check incompatible module issue and avoid the failure of Python2 scripts.
+
+For example, there is no `scapy.contrib.macsec` module for Python2, it's safe to add if condition here to avoid failure:
+
+```
+MACSEC_SUPPORTED = False
+if hasattr(scapy, "VERSION") and tuple(map(int, scapy.VERSION.split('.'))) >= (2, 4, 5):
+ MACSEC_SUPPORTED = True
+if MACSEC_SUPPORTED:
+ import scapy.contrib.macsec as scapy_macsec
+```
+
+If your scripts involves the following library scripts, please create a **soft link** under `py3` for them after modification. Other remained scripts of Python2 will still use them. They will be used for both sides during the period of migration.
+ - `lmp.py`
+ - `fib.py`
+ - `fib_test.py`
+ - `device_connection.py`
+
+**Important: These library scripts should be both Python2 and Python3 compatible.**
+
+
+Please check [this PR](https://github.com/Azure/sonic-mgmt/pull/5490) for reference.
+
+**3. Update `tests` script to call virtual env ptf**
+
+Add `is_python3=True` parameter for `ptf_runner` in your test script. Such as:
+
+```
+ ptf_runner(ptfhost,
+ "ptftests",
+ "dhcpv6_relay_test.DHCPTest",
+ platform_dir="ptftests",
+ params={"hostname": duthost.hostname,
+ "client_port_index": dhcp_relay['client_iface']['port_idx'],
+ "leaf_port_indices": repr(dhcp_relay['uplink_port_indices']),
+ "num_dhcp_servers": len(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs']),
+ "server_ip": str(dhcp_relay['downlink_vlan_iface']['dhcpv6_server_addrs'][0]),
+ "relay_iface_ip": str(dhcp_relay['downlink_vlan_iface']['addr']),
+ "relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
+ "relay_link_local": str(dhcp_relay['uplink_interface_link_local']),
+ "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr'])},
+ log_file="/tmp/dhcpv6_relay_test.DHCPTest.log", is_python3=True)
+```
+
+
+
+It will run `/root/env-python3/bin/ptf` instead of `ptf` which is used for Python2 now.
+And it will also call your modified ptftests scripts under subfolder `py3`.
+
+That's the difference of usage between Python2 and Python3 script.
+
+Please take [DHCP Relay PR](https://github.com/Azure/sonic-mgmt/pull/5534) and [dir bcast RP](https://github.com/Azure/sonic-mgmt/pull/5540)for reference.
+
+
+
+**4. Run test cases with correct docker-ptf image to do verification**
+
+`2to3` only does some common format or syntax changes, it's not enough.
+
+For Python3, scripts use `ptf 0.9.3` and `scapy 2.4.5`, some packet format could be different, we have to retest scripts manually before submit PR.
+
+- Check `docker-ptf` image
+Login to `docker-ptf` container to check if it's correct image. If there is `env-python3` under `/root`, it means you are using the correct image.
+
+```
+azure@STR-ACS-SERV-07:~$ docker exec -it ptf_vms7-11 bash
+root@72cf0e0442c3:~# cd /root
+root@72cf0e0442c3:~# ls
+debs env-python3 gnxi python-saithrift_0.9.4_amd64.deb
+root@72cf0e0442c3:~#
+```
+Otherwise, it will throw exception to ask you to update `docker-ptf` image.
+
+
+- Submit PR
+Please add **[python3]** in your RP title.
+
+## Stage 3: Migrate other functionaly scripts which run in docker-ptf
+Some functional scripts are also copied and ran in docker-ptf, such as:
+- `scripts/arp_responder.py`
+- `scripts/garp_service.py`
+- `scripts/icmp_responder.py`
+- `scripts/dual_tor_sniffer.py`
+- `scripts/nat_ptf_echo.py`
+- `bgp/bgp_monitor_dump.py`
+- `http/start_http_server.py`
+- `http/stop_http_server.py`
+- `ansible/library/exabgp.py`
+- `arp/files/ferret.py`
+
+During migration of these scripts, make sure to call `/root/env-python3/bin/python3` to run them.
+
+## Stage 4: Migrate docker-ptf to pure Python3 environment
+When stage 3 is done, this could be the final stage:
+- Update docker-ptf's Dockerfile, use bullseye and will no install Python2, just keep Python3 environment.
+- Move all ptftests scripts from `py3` subfolder to `ptftests` fodler.
+- Remove `is_python3` parameter in ptf_runner, remove `/root/env-python3/bin/ptf` part, just call `ptf` command.
+- Remove `is_python3=True` for all tests scripts.
+- Remove those checkers for Python version in `ptftests` scripts.
+
+
+
+
From 901ca592b7ba0f464c2cf7ce98130f94451fedfd Mon Sep 17 00:00:00 2001
From: Zhijian Li
Date: Fri, 27 May 2022 21:29:33 +0800
Subject: [PATCH 07/23] [platform] spend more time waiting for SSH shutdown and
startup after OOM reboot (#5724)
What is the motivation for this PR?
Spend more time waiting for DUT reboot after OOM.
Test cases in test_reboot.py wait 3 to 5 minutes for both SSH shutdown and startup. For OOM reboot, we should be more tolerant. So I decided to wait at most 6 minutes in test_memory_exhaustion.
How did you do it?
For OOM reboot, wait at most 6 minutes for both SSH shutdown and startup.
How did you verify/test it?
Verified on physical testbeds.
Signed-off-by: Zhijian Li
---
tests/platform_tests/test_memory_exhaustion.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tests/platform_tests/test_memory_exhaustion.py b/tests/platform_tests/test_memory_exhaustion.py
index ea2224eaeb5..52c57cfc0ec 100644
--- a/tests/platform_tests/test_memory_exhaustion.py
+++ b/tests/platform_tests/test_memory_exhaustion.py
@@ -10,6 +10,9 @@
pytest.mark.topology('any')
]
+SSH_SHUTDOWN_TIMEOUT = 360
+SSH_STARTUP_TIMEOUT = 360
+
def test_memory_exhaustion(duthosts, enum_frontend_dut_hostname, localhost):
"""validate kernel will panic and reboot the DUT when runs out of memory and hits oom event"""
@@ -33,7 +36,7 @@ def test_memory_exhaustion(duthosts, enum_frontend_dut_hostname, localhost):
state='absent',
search_regex=SONIC_SSH_REGEX,
delay=10,
- timeout=120,
+ timeout=SSH_SHUTDOWN_TIMEOUT,
module_ignore_errors=True)
pytest_assert(not res.is_failed and 'Timeout' not in res.get('msg', ''),
'DUT {} did not shutdown'.format(hostname))
@@ -44,7 +47,7 @@ def test_memory_exhaustion(duthosts, enum_frontend_dut_hostname, localhost):
state='started',
search_regex=SONIC_SSH_REGEX,
delay=10,
- timeout=120,
+ timeout=SSH_STARTUP_TIMEOUT,
module_ignore_errors=True)
pytest_assert(not res.is_failed and 'Timeout' not in res.get('msg', ''),
'DUT {} did not startup'.format(hostname))
From 56a6217070bbd8c779ba98cd57d509ebebca1de1 Mon Sep 17 00:00:00 2001
From: Vaibhav Hemant Dixit
Date: Fri, 27 May 2022 15:10:06 -0700
Subject: [PATCH 08/23] [advanced-reboot] Fix bgpd.log handling, lacp_session
check and log collection (#5415)
Test fixes for test_advanced_reboot: fetch bgpd.log for large-disk upgrades, handle None case for lacp_session.
In upgrade from 201811 to 202012 - for large disk devices, the bgpd.log is still sitting inside quagga dir. To make ansible fetch work from one location only, copy the log from quagga or frr dir to a common file. This way it works as a generic solution for all upgrade path scenarios.
---
tests/common/fixtures/advanced_reboot.py | 19 +++---
tests/platform_tests/conftest.py | 80 ++++++++++++------------
2 files changed, 52 insertions(+), 47 deletions(-)
diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py
index 351904c8b8a..c87b8070f6f 100644
--- a/tests/common/fixtures/advanced_reboot.py
+++ b/tests/common/fixtures/advanced_reboot.py
@@ -41,7 +41,7 @@ def __init__(self, request, duthost, ptfhost, localhost, tbinfo, creds, **kwargs
@param tbinfo: fixture provides information about testbed
@param kwargs: extra parameters including reboot type
'''
- assert 'rebootType' in kwargs and kwargs['rebootType'] in ['fast-reboot', 'warm-reboot', 'warm-reboot -f'], (
+ assert 'rebootType' in kwargs and ('warm-reboot' in kwargs['rebootType'] or 'fast-reboot' in kwargs['rebootType']) , (
"Please set rebootType var."
)
@@ -366,17 +366,22 @@ def __fetchTestLogs(self, rebootOper=None):
os.makedirs(log_dir)
log_dir = log_dir + "/"
+ if "warm" in self.rebootType:
+ # normalize "warm-reboot -f", "warm-reboot -c" to "warm-reboot" for report collection
+ reboot_file_prefix = "warm-reboot"
+ else:
+ reboot_file_prefix = self.rebootType
if rebootOper is None:
- rebootLog = '/tmp/{0}.log'.format(self.rebootType)
- rebootReport = '/tmp/{0}-report.json'.format(self.rebootType)
+ rebootLog = '/tmp/{0}.log'.format(reboot_file_prefix)
+ rebootReport = '/tmp/{0}-report.json'.format(reboot_file_prefix)
capturePcap = '/tmp/capture.pcap'
filterPcap = '/tmp/capture_filtered.pcap'
syslogFile = '/tmp/syslog'
sairedisRec = '/tmp/sairedis.rec'
swssRec = '/tmp/swss.rec'
else:
- rebootLog = '/tmp/{0}-{1}.log'.format(self.rebootType, rebootOper)
- rebootReport = '/tmp/{0}-{1}-report.json'.format(self.rebootType, rebootOper)
+ rebootLog = '/tmp/{0}-{1}.log'.format(reboot_file_prefix, rebootOper)
+ rebootReport = '/tmp/{0}-{1}-report.json'.format(reboot_file_prefix, rebootOper)
capturePcap = '/tmp/capture_{0}.pcap'.format(rebootOper)
filterPcap = '/tmp/capture_filtered_{0}.pcap'.format(rebootOper)
syslogFile = '/tmp/syslog_{0}'.format(rebootOper)
@@ -628,7 +633,7 @@ def tearDown(self):
self.__restorePrevImage()
@pytest.fixture
-def get_advanced_reboot(request, duthosts, rand_one_dut_hostname, ptfhost, localhost, tbinfo, creds):
+def get_advanced_reboot(request, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, localhost, tbinfo, creds):
'''
Pytest test fixture that provides access to AdvancedReboot test fixture
@param request: pytest request object
@@ -637,7 +642,7 @@ def get_advanced_reboot(request, duthosts, rand_one_dut_hostname, ptfhost, local
@param localhost: Localhost for interacting with localhost through ansible
@param tbinfo: fixture provides information about testbed
'''
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
instances = []
def get_advanced_reboot(**kwargs):
diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py
index 033d7848b1e..503cdb41434 100644
--- a/tests/platform_tests/conftest.py
+++ b/tests/platform_tests/conftest.py
@@ -138,10 +138,11 @@ def get_report_summary(analyze_result, reboot_type):
_parse_timestamp(marker_first_time)).total_seconds()
time_spans_summary.update({entity.lower(): str(time_taken)})
- lacp_sessions_waittime = analyze_result.get(\
- "controlplane", {"lacp_sessions": []}).pop("lacp_sessions")
+ lacp_sessions_dict = analyze_result.get("controlplane")
+ lacp_sessions_waittime = lacp_sessions_dict.pop("lacp_sessions")\
+ if lacp_sessions_dict and "lacp_sessions" in lacp_sessions_dict else None
controlplane_summary = {"downtime": "", "arp_ping": "", "lacp_session_max_wait": ""}
- if len(lacp_sessions_waittime) > 0:
+ if lacp_sessions_waittime and len(lacp_sessions_waittime) > 0:
max_lacp_session_wait = max(list(lacp_sessions_waittime.values()))
analyze_result.get(\
"controlplane", controlplane_summary).update(
@@ -362,6 +363,10 @@ def overwrite_script_to_backup_logs(duthost, reboot_type, bgpd_log):
duthost.shell(insert_backup_command)
+def get_current_sonic_version(duthost):
+ return duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
+
+
@pytest.fixture()
def advanceboot_loganalyzer(duthosts, rand_one_dut_hostname, request):
"""
@@ -387,27 +392,38 @@ def advanceboot_loganalyzer(duthosts, rand_one_dut_hostname, request):
device_marks = [arg for mark in request.node.iter_markers(name='device_type') for arg in mark.args]
if 'vs' not in device_marks:
pytest.skip('Testcase not supported for kvm')
-
- base_os_version = duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
- if 'SONiC-OS-201811' in base_os_version:
- bgpd_log = "/var/log/quagga/bgpd.log"
- else:
- bgpd_log = "/var/log/frr/bgpd.log"
-
hwsku = duthost.facts["hwsku"]
log_filesystem = duthost.shell("df -h | grep '/var/log'")['stdout']
logs_in_tmpfs = True if log_filesystem and "tmpfs" in log_filesystem else False
- if hwsku in SMALL_DISK_SKUS or logs_in_tmpfs:
- # For small disk devices, /var/log in mounted in tmpfs.
- # Hence, after reboot the preboot logs are lost.
- # For log_analyzer to work, it needs logs from the shutdown path
- # Below method inserts a step in reboot script to back up logs to /host/
- overwrite_script_to_backup_logs(duthost, reboot_type, bgpd_log)
+ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="test_advanced_reboot_{}".format(test_name))
- loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="test_advanced_reboot_{}".format(test_name),
- additional_files={'/var/log/swss/sairedis.rec': 'recording on: /var/log/swss/sairedis.rec', bgpd_log: ''})
+ def bgpd_log_handler(preboot=False):
+ # check current OS version post-reboot. This can be different than preboot OS version in case of upgrade
+ current_os_version = get_current_sonic_version(duthost)
+ if preboot:
+ if 'SONiC-OS-201811' in current_os_version:
+ bgpd_log = "/var/log/quagga/bgpd.log"
+ else:
+ bgpd_log = "/var/log/frr/bgpd.log"
+ additional_files={'/var/log/swss/sairedis.rec': '', bgpd_log: ''}
+ loganalyzer.additional_files = list(additional_files.keys())
+ loganalyzer.additional_start_str = list(additional_files.values())
+ return bgpd_log
+ else:
+ # log_analyzer may start with quagga and end with frr, and frr.log might still have old logs.
+ # To avoid missing preboot log, or analyzing old logs, combine quagga and frr log into new file
+ duthost.shell("cat {} {} | sort -n > {}".format(
+ "/var/log/quagga/bgpd.log", "/var/log/frr/bgpd.log", "/var/log/bgpd.log"), module_ignore_errors=True)
+ loganalyzer.additional_files = ['/var/log/swss/sairedis.rec', '/var/log/bgpd.log']
def pre_reboot_analysis():
+ bgpd_log = bgpd_log_handler(preboot=True)
+ if hwsku in SMALL_DISK_SKUS or logs_in_tmpfs:
+ # For small disk devices, /var/log in mounted in tmpfs.
+ # Hence, after reboot the preboot logs are lost.
+ # For log_analyzer to work, it needs logs from the shutdown path
+ # Below method inserts a step in reboot script to back up logs to /host/
+ overwrite_script_to_backup_logs(duthost, reboot_type, bgpd_log)
marker = loganalyzer.init()
loganalyzer.load_common_config()
@@ -422,35 +438,18 @@ def pre_reboot_analysis():
loganalyzer.match_regex = []
return marker
- def post_reboot_analysis(marker, reboot_oper=None, log_dir=None):
+ def post_reboot_analysis(marker, event_counters=None, reboot_oper=None, log_dir=None):
+ bgpd_log_handler()
if hwsku in SMALL_DISK_SKUS or logs_in_tmpfs:
restore_backup = "mv /host/syslog.99 /var/log/; " +\
"mv /host/sairedis.rec.99 /var/log/swss/; " +\
"mv /host/swss.rec.99 /var/log/swss/; " +\
- "mv /host/bgpd.log.99 /var/log/frr/"
+ "mv /host/bgpd.log.99 /var/log/"
duthost.shell(restore_backup, module_ignore_errors=True)
# find the fast/warm-reboot script path
reboot_script_path = duthost.shell('which {}'.format("{}-reboot".format(reboot_type)))['stdout']
# restore original script. If the ".orig" file does not exist (upgrade path case), ignore the error.
duthost.shell("mv {} {}".format(reboot_script_path + ".orig", reboot_script_path), module_ignore_errors=True)
-
- # check current OS version post-reboot. This can be different than preboot OS version in case of upgrade
- target_os_version = duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
- upgrade_out_201811 = "SONiC-OS-201811" in base_os_version and "SONiC-OS-201811" not in target_os_version
- if 'SONiC-OS-201811' in target_os_version:
- bgpd_log = "/var/log/quagga/bgpd.log"
- else:
- bgpd_log = "/var/log/frr/bgpd.log"
- if upgrade_out_201811 and not logs_in_tmpfs:
- # if upgrade from 201811 to future branch is done there are two cases:
- # 1. Small disk devices: previous quagga logs don't exist anymore, handled in restore_backup.
- # 2. Other devices: prev quagga log to be copied to a common place, for ansible extract to work:
- duthost.shell("cp {} {}".format(
- "/var/log/quagga/bgpd.log", "/var/log/frr/bgpd.log.99"), module_ignore_errors=True)
- additional_files={'/var/log/swss/sairedis.rec': 'recording on: /var/log/swss/sairedis.rec', bgpd_log: ''}
- loganalyzer.additional_files = list(additional_files.keys())
- loganalyzer.additional_start_str = list(additional_files.values())
-
result = loganalyzer.analyze(marker, fail=False)
analyze_result = {"time_span": dict(), "offset_from_kexec": dict()}
offset_from_kexec = dict()
@@ -557,7 +556,7 @@ def capture_interface_counters(duthosts, rand_one_dut_hostname):
res.pop('stdout')
res.pop('stderr')
outputs.append(res)
- logging.info("Counters before reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4)))
+ logging.debug("Counters before reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4)))
yield
@@ -567,7 +566,8 @@ def capture_interface_counters(duthosts, rand_one_dut_hostname):
res.pop('stdout')
res.pop('stderr')
outputs.append(res)
- logging.info("Counters after reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4)))
+ logging.debug("Counters after reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4)))
+
@pytest.fixture()
def thermal_manager_enabled(duthosts, enum_rand_one_per_hwsku_hostname):
From 6577b61af4160cb4f4ca7462ad0d79989f5cf1ef Mon Sep 17 00:00:00 2001
From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com>
Date: Sat, 28 May 2022 12:31:40 +0800
Subject: [PATCH 09/23] [nic_simulator] Prioritize flows in the OVS bridge
(#5720)
Approach
What is the motivation for this PR?
The flows defined in the OVS bridge previously has no priority:
```
[1]: cookie=0x0, duration=4.564s, table=0, n_packets=41, n_bytes=2829, in_port="vlab-11-2" actions=output:"iaa-vms6-8-1",output:"nic-vms6-8-1"
[2]: cookie=0x0, duration=4.552s, table=0, n_packets=43, n_bytes=2967, in_port="vlab-12-2" actions=output:"iaa-vms6-8-1",output:"nic-vms6-8-1"
[3]: cookie=0x0, duration=4.533s, table=0, n_packets=0, n_bytes=0, in_port="nic-vms6-8-1" actions=output:"vlab-12-2",output:"vlab-11-2"
[4]: cookie=0x0, duration=4.497s, table=0, n_packets=0, n_bytes=0, in_port="iaa-vms6-8-1" actions=group:1
[5]: cookie=0x0, duration=4.522s, table=0, n_packets=84, n_bytes=5796, icmp,in_port="iaa-vms6-8-1" actions=output:"vlab-12-2",output:"vlab-11-2"
But the flow to replicate the ICMP traffic to both ToRs[5] should be prioritized over the ECMP one[4], so upstream ICMP traffic will always be duplicated to both ToRs instead of forwarded with the ECMP selection.
```
Signed-off-by: Longxiang Lyu lolv@microsoft.com
How did you do it?
Add priority to the flows to prioritize the ICMP replication flow over the ECMP selection flow(flows with higher priority get matched first).
After:
```
cookie=0x0, duration=4.564s, table=0, n_packets=41, n_bytes=2829, priority=10,in_port="vlab-11-2" actions=output:"iaa-vms6-8-1",output:"nic-vms6-8-1"
cookie=0x0, duration=4.552s, table=0, n_packets=43, n_bytes=2967, priority=10,in_port="vlab-12-2" actions=output:"iaa-vms6-8-1",output:"nic-vms6-8-1"
cookie=0x0, duration=4.533s, table=0, n_packets=0, n_bytes=0, priority=9,in_port="nic-vms6-8-1" actions=output:"vlab-12-2",output:"vlab-11-2"
cookie=0x0, duration=4.497s, table=0, n_packets=0, n_bytes=0, priority=7,in_port="iaa-vms6-8-1" actions=group:1
cookie=0x0, duration=4.522s, table=0, n_packets=84, n_bytes=5796, priority=8,icmp,in_port="iaa-vms6-8-1" actions=output:"vlab-12-2",output:"vlab-11-2"
```
How did you verify/test it?
If both ToRs are active, they could receive both sides' heartbeats replies
---
.../dualtor/nic_simulator/nic_simulator.py | 36 ++++++++++---------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/ansible/dualtor/nic_simulator/nic_simulator.py b/ansible/dualtor/nic_simulator/nic_simulator.py
index d1134a14150..9edea44f96b 100644
--- a/ansible/dualtor/nic_simulator/nic_simulator.py
+++ b/ansible/dualtor/nic_simulator/nic_simulator.py
@@ -164,17 +164,21 @@ def to_string(self):
class OVSFlow(StrObj):
"""Object to represent an OVS flow."""
- __slots__ = ("in_port", "output_ports", "group", "_str_prefix")
+ __slots__ = ("in_port", "output_ports", "group", "priority", "_str_prefix")
- def __init__(self, in_port, packet_filter=None, output_ports=[], group=None):
+ def __init__(self, in_port, packet_filter=None, output_ports=[], group=None, priority=None):
self.in_port = in_port
self.packet_filter = packet_filter
self.output_ports = set(output_ports)
self.group = group
+ self.priority = priority
+ self._str_prefix = []
+ if self.priority:
+ self._str_prefix.append("priority=%s" % self.priority)
if self.packet_filter:
- self._str_prefix = "%s,in_port=%s" % (self.packet_filter, self.in_port)
- else:
- self._str_prefix = "in_port=%s" % (self.in_port)
+ self._str_prefix.append(str(self.packet_filter))
+ self._str_prefix.append("in_port=%s" % self.in_port)
+ self._str_prefix = ",".join(self._str_prefix)
def to_string(self):
flow_parts = [self._str_prefix]
@@ -262,8 +266,8 @@ class UpstreamECMPFlow(OVSFlow):
__slots__ = ()
- def __init__(self, in_port, group):
- super(UpstreamECMPFlow, self).__init__(in_port, group=group)
+ def __init__(self, in_port, group, priority=None):
+ super(UpstreamECMPFlow, self).__init__(in_port, group=group, priority=priority)
def set_upper_tor_forwarding_state(self, state):
self.group.set_upper_tor_forwarding_state(state)
@@ -361,17 +365,17 @@ def _init_flows(self):
self._del_flows()
self._del_groups()
# downstream flows
- self._add_flow(self.upper_tor_port, output_ports=[self.ptf_port, self.server_nic])
- self._add_flow(self.lower_tor_port, output_ports=[self.ptf_port, self.server_nic])
+ self._add_flow(self.upper_tor_port, output_ports=[self.ptf_port, self.server_nic], priority=10)
+ self._add_flow(self.lower_tor_port, output_ports=[self.ptf_port, self.server_nic], priority=10)
# upstream flows
# upstream packet from server NiC should be directed to both ToRs
- self._add_flow(self.server_nic, output_ports=[self.upper_tor_port, self.lower_tor_port])
+ self._add_flow(self.server_nic, output_ports=[self.upper_tor_port, self.lower_tor_port], priority=9)
# upstream icmp packet from ptf port should be directed to both ToRs
- self._add_flow(self.ptf_port, packet_filter="icmp", output_ports=[self.upper_tor_port, self.lower_tor_port])
+ self._add_flow(self.ptf_port, packet_filter="icmp", output_ports=[self.upper_tor_port, self.lower_tor_port], priority=8)
# upstream packet from ptf port should be ECMP directed to active ToRs
self.upstream_ecmp_group = self._add_upstream_ecmp_group(1, self.upper_tor_port, self.lower_tor_port)
- self.upstream_ecmp_flow = self._add_upstream_ecmp_flow(self.ptf_port, self.upstream_ecmp_group)
+ self.upstream_ecmp_flow = self._add_upstream_ecmp_flow(self.ptf_port, self.upstream_ecmp_group, priority=7)
def _get_ports(self):
result = OVSCommand.ovs_vsctl_list_ports(self.bridge_name)
@@ -387,8 +391,8 @@ def _del_groups(self):
self.upstream_ecmp_group = None
self.groups.clear()
- def _add_flow(self, in_port, packet_filter=None, output_ports=[], group=None):
- flow = OVSFlow(in_port, packet_filter=packet_filter, output_ports=output_ports, group=group)
+ def _add_flow(self, in_port, packet_filter=None, output_ports=[], group=None, priority=None):
+ flow = OVSFlow(in_port, packet_filter=packet_filter, output_ports=output_ports, group=group, priority=priority)
logging.info("Add flow to bridge %s: %s", self.bridge_name, flow)
OVSCommand.ovs_ofctl_add_flow(self.bridge_name, flow)
self.flows.append(flow)
@@ -401,8 +405,8 @@ def _add_upstream_ecmp_group(self, group_id, upper_tor_port, lower_tor_port):
self.groups.append(group)
return group
- def _add_upstream_ecmp_flow(self, in_port, group):
- flow = UpstreamECMPFlow(in_port, group)
+ def _add_upstream_ecmp_flow(self, in_port, group, priority=None):
+ flow = UpstreamECMPFlow(in_port, group, priority=priority)
logging.info("Add upstream ecmp flow to bridge %s: %s", self.bridge_name, flow)
OVSCommand.ovs_ofctl_add_flow(self.bridge_name, flow)
self.flows.append(flow)
From 6bfca4404e711ad3e0f938b2dcdd80ab3e039e5a Mon Sep 17 00:00:00 2001
From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com>
Date: Sun, 29 May 2022 17:40:21 +0800
Subject: [PATCH 10/23] Print new core dump filename under /var/core post test
(#5716)
* Print new core dump filename under /var/core post test
What is the motivation for this PR?
Provide a more convinient view to see new core dumps.
How did you do it?
Print new core dump filename.
How did you verify/test it?
Run a test
Any platform specific information?
Supported testbed topology if it's a new test case?
---
tests/conftest.py | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/tests/conftest.py b/tests/conftest.py
index d7f4238544e..94625436a3c 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1568,18 +1568,23 @@ def collect_db_dump(request, duthosts):
@pytest.fixture(scope="module", autouse=True)
def verify_new_core_dumps(duthost):
if "20191130" in duthost.os_version:
- pre_existing_cores = duthost.shell('ls /var/core/ | grep -v python | wc -l')['stdout']
+ pre_existing_cores = duthost.shell('ls /var/core/ | grep -v python')['stdout'].split()
else:
- pre_existing_cores = duthost.shell('ls /var/core/ | wc -l')['stdout']
-
+ pre_existing_cores = duthost.shell('ls /var/core/')['stdout'].split()
+
yield
if "20191130" in duthost.os_version:
- coredumps_count = duthost.shell('ls /var/core/ | grep -v python | wc -l')['stdout']
+ cur_cores = duthost.shell('ls /var/core/ | grep -v python')['stdout'].split()
else:
- coredumps_count = duthost.shell('ls /var/core/ | wc -l')['stdout']
- if int(coredumps_count) > int(pre_existing_cores):
- pytest.fail("Core dumps found. Expected: {} Found: {}. Test failed".format(pre_existing_cores,\
- coredumps_count))
+ cur_cores = duthost.shell('ls /var/core/')['stdout'].split()
+
+ new_core_dumps = set(cur_cores) - set(pre_existing_cores)
+ # convert to list so print msg will not contain "set()"
+ new_core_dumps = list(new_core_dumps)
+
+ if new_core_dumps:
+ pytest.fail("Core dumps found. Expected: %s Found: %s. Test failed. New core dumps: %s" % (len(pre_existing_cores),\
+ len(cur_cores), new_core_dumps))
def verify_packets_any_fixed(test, pkt, ports=[], device_number=0):
"""
From 8c7010cdfcfd5f174a185292db51bf8f6fc94736 Mon Sep 17 00:00:00 2001
From: Ze Gan
Date: Sun, 29 May 2022 20:02:50 -0700
Subject: [PATCH 11/23] [macsec]: Override dp_poll of ptf by MACsec's (#5490)
Summary:
Override dp_poll by MACsec dp_poll so that these dataplane testcases can be tested under MACsec environment
What is the motivation for this PR?
Existing dataplane tests cannot be tested under MACsec environment due to the traffic under MACsec link is encrypted.
How did you do it?
I write a macsec_dp_poll to override the original ptf dp_poll, and for each MACsec packets, the macsec_dp_poll will decrypt them and pass the clear packets to the upper layer.
Signed-off-by: Ze Gan
---
ansible/roles/test/files/ptftests/fib.py | 8 +-
ansible/roles/test/files/ptftests/fib_test.py | 10 +-
ansible/roles/test/files/ptftests/lpm.py | 5 +-
ansible/roles/test/files/ptftests/macsec.py | 76 +++++++++++++++
ansible/roles/test/files/ptftests/py3/fib.py | 1 +
.../roles/test/files/ptftests/py3/fib_test.py | 1 +
.../files/ptftests/{ => py3}/hash_test.py | 9 +-
ansible/roles/test/files/ptftests/py3/lpm.py | 1 +
.../roles/test/files/ptftests/py3/macsec.py | 1 +
ansible/roles/test/files/ptftests/remote.py | 2 +-
tests/common/devices/ptf.py | 29 +++++-
tests/common/plugins/ptfadapter/__init__.py | 14 ++-
tests/conftest.py | 4 +-
tests/fib/test_fib.py | 8 +-
tests/macsec/macsec_helper.py | 96 +++++++++++++------
tests/macsec/test_macsec.py | 18 ++--
tests/ptf_runner.py | 4 +
17 files changed, 220 insertions(+), 67 deletions(-)
create mode 100644 ansible/roles/test/files/ptftests/macsec.py
create mode 120000 ansible/roles/test/files/ptftests/py3/fib.py
create mode 120000 ansible/roles/test/files/ptftests/py3/fib_test.py
rename ansible/roles/test/files/ptftests/{ => py3}/hash_test.py (98%)
create mode 120000 ansible/roles/test/files/ptftests/py3/lpm.py
create mode 120000 ansible/roles/test/files/ptftests/py3/macsec.py
diff --git a/ansible/roles/test/files/ptftests/fib.py b/ansible/roles/test/files/ptftests/fib.py
index 5686bf516c0..0e2fc8aa8dd 100644
--- a/ansible/roles/test/files/ptftests/fib.py
+++ b/ansible/roles/test/files/ptftests/fib.py
@@ -1,4 +1,6 @@
import re
+import six
+
from ipaddress import ip_address, ip_network
from lpm import LpmDict
@@ -58,7 +60,7 @@ def __init__(self, file_path):
for line in f.readlines():
if pattern.match(line): continue
entry = line.split(' ', 1)
- prefix = ip_network(unicode(entry[0]))
+ prefix = ip_network(six.text_type(entry[0]))
next_hop = self.NextHop(entry[1])
if prefix.version is 4:
self._ipv4_lpm_dict[str(prefix)] = next_hop
@@ -66,14 +68,14 @@ def __init__(self, file_path):
self._ipv6_lpm_dict[str(prefix)] = next_hop
def __getitem__(self, ip):
- ip = ip_address(unicode(ip))
+ ip = ip_address(six.text_type(ip))
if ip.version is 4:
return self._ipv4_lpm_dict[str(ip)]
elif ip.version is 6:
return self._ipv6_lpm_dict[str(ip)]
def __contains__(self, ip):
- ip_obj = ip_address(unicode(ip))
+ ip_obj = ip_address(six.text_type(ip))
if ip_obj.version == 4:
return self._ipv4_lpm_dict.contains(ip)
elif ip_obj.version == 6:
diff --git a/ansible/roles/test/files/ptftests/fib_test.py b/ansible/roles/test/files/ptftests/fib_test.py
index e03dea22ade..28353f2f0af 100644
--- a/ansible/roles/test/files/ptftests/fib_test.py
+++ b/ansible/roles/test/files/ptftests/fib_test.py
@@ -32,6 +32,7 @@
from ptf.testutils import verify_no_packet_any
import fib
+import macsec
class FibTest(BaseTest):
'''
@@ -175,6 +176,11 @@ def get_src_and_exp_ports(self, dst_ip):
exp_port_list = next_hop.get_next_hop_list()
if src_port in exp_port_list:
continue
+ # MACsec link only receive encrypted packets
+ # It's hard to simulate encrypted packets on the injected port
+ # Because the MACsec is session based channel but the injected ports are stateless ports
+ if src_port in macsec.MACSEC_INFOS.keys():
+ continue
logging.info('src_port={}, exp_port_list={}, active_dut_index={}'.format(src_port, exp_port_list, active_dut_index))
break
return src_port, exp_port_list, next_hop
@@ -183,9 +189,9 @@ def check_ip_range(self, ip_range, dut_index, ipv4=True):
dst_ips = []
dst_ips.append(ip_range.get_first_ip())
- if ip_range.length > 1:
+ if ip_range.length() > 1:
dst_ips.append(ip_range.get_last_ip())
- if ip_range.length > 2:
+ if ip_range.length() > 2:
dst_ips.append(ip_range.get_random_ip())
for dst_ip in dst_ips:
diff --git a/ansible/roles/test/files/ptftests/lpm.py b/ansible/roles/test/files/ptftests/lpm.py
index 609613d4eea..941c058aa75 100644
--- a/ansible/roles/test/files/ptftests/lpm.py
+++ b/ansible/roles/test/files/ptftests/lpm.py
@@ -1,4 +1,5 @@
import random
+import six
from ipaddress import ip_address, ip_network
from SubnetTree import SubnetTree
@@ -62,7 +63,7 @@ def __init__(self, ipv4=True):
self._boundaries = { ip_address(u'0.0.0.0') : 1} if ipv4 else { ip_address(u'::') : 1}
def __setitem__(self, key, value):
- prefix = ip_network(unicode(key))
+ prefix = ip_network(six.text_type(key))
# add the current key to self._prefix_set only when it is not the default route and it is not a duplicate key
if prefix.prefixlen and key not in self._prefix_set:
boundary = prefix[0]
@@ -78,7 +79,7 @@ def __getitem__(self, key):
def __delitem__(self, key):
if '/0' not in key:
- prefix = ip_network(unicode(key))
+ prefix = ip_network(six.text_type(key))
boundary = prefix[0]
next_boundary = prefix[-1] + 1
self._boundaries[boundary] = self._boundaries.get(boundary) - 1
diff --git a/ansible/roles/test/files/ptftests/macsec.py b/ansible/roles/test/files/ptftests/macsec.py
new file mode 100644
index 00000000000..aec169a9ea5
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/macsec.py
@@ -0,0 +1,76 @@
+import os
+import pickle
+import cryptography.exceptions
+import time
+
+import ptf
+import scapy.all as scapy
+MACSEC_SUPPORTED = False
+if hasattr(scapy, "VERSION") and tuple(map(int, scapy.VERSION.split('.'))) >= (2, 4, 5):
+ MACSEC_SUPPORTED = True
+if MACSEC_SUPPORTED:
+ import scapy.contrib.macsec as scapy_macsec
+
+MACSEC_INFO_FILE = "macsec_info.pickle"
+
+MACSEC_INFOS = {}
+
+
+def __decap_macsec_pkt(macsec_pkt, sci, an, sak, encrypt, send_sci, pn, xpn_en=False, ssci=None, salt=None):
+ sa = scapy_macsec.MACsecSA(sci=sci,
+ an=an,
+ pn=pn,
+ key=sak,
+ icvlen=16,
+ encrypt=encrypt,
+ send_sci=send_sci,
+ xpn_en=xpn_en,
+ ssci=ssci,
+ salt=salt)
+ try:
+ pkt = sa.decrypt(macsec_pkt)
+ except cryptography.exceptions.InvalidTag:
+ # Invalid MACsec packets
+ return None
+ pkt = sa.decap(pkt)
+ return pkt
+
+
+def __macsec_dp_poll(test, device_number=0, port_number=None, timeout=None, exp_pkt=None):
+ recent_packets = []
+ packet_count = 0
+ if timeout is None:
+ timeout = ptf.ptfutils.default_timeout
+ while True:
+ start_time = time.time()
+ ret = __origin_dp_poll(
+ test, device_number=device_number, port_number=port_number, timeout=timeout, exp_pkt=None)
+ timeout -= time.time() - start_time
+ # The device number of PTF host is 0, if the target port isn't a injected port(belong to ptf host), Don't need to do MACsec further.
+ if isinstance(ret, test.dataplane.PollFailure) or exp_pkt is None or ret.device != 0:
+ return ret
+ pkt = scapy.Ether(ret.packet)
+ if pkt[scapy.Ether].type != 0x88e5:
+ if ptf.dataplane.match_exp_pkt(exp_pkt, pkt):
+ return ret
+ else:
+ continue
+ if ret.port in MACSEC_INFOS and MACSEC_INFOS[ret.port]:
+ encrypt, send_sci, xpn_en, sci, an, sak, ssci, salt = MACSEC_INFOS[ret.port]
+ pkt = __decap_macsec_pkt(pkt, sci, an, sak, encrypt,
+ send_sci, 0, xpn_en, ssci, salt)
+ if pkt is not None and ptf.dataplane.match_exp_pkt(exp_pkt, pkt):
+ return ret
+ recent_packets.append(pkt)
+ packet_count += 1
+ if timeout <= 0:
+ break
+ return test.dataplane.PollFailure(exp_pkt, recent_packets,packet_count)
+
+
+if MACSEC_SUPPORTED and os.path.exists(MACSEC_INFO_FILE):
+ with open(MACSEC_INFO_FILE, "rb") as f:
+ MACSEC_INFOS = pickle.load(f, encoding="bytes")
+ if MACSEC_INFOS:
+ __origin_dp_poll = ptf.testutils.dp_poll
+ ptf.testutils.dp_poll = __macsec_dp_poll
diff --git a/ansible/roles/test/files/ptftests/py3/fib.py b/ansible/roles/test/files/ptftests/py3/fib.py
new file mode 120000
index 00000000000..c01d039da72
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/py3/fib.py
@@ -0,0 +1 @@
+../fib.py
\ No newline at end of file
diff --git a/ansible/roles/test/files/ptftests/py3/fib_test.py b/ansible/roles/test/files/ptftests/py3/fib_test.py
new file mode 120000
index 00000000000..7b2510b00ab
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/py3/fib_test.py
@@ -0,0 +1 @@
+../fib_test.py
\ No newline at end of file
diff --git a/ansible/roles/test/files/ptftests/hash_test.py b/ansible/roles/test/files/ptftests/py3/hash_test.py
similarity index 98%
rename from ansible/roles/test/files/ptftests/hash_test.py
rename to ansible/roles/test/files/ptftests/py3/hash_test.py
index 6eab2447d06..1d3e93f48a8 100644
--- a/ansible/roles/test/files/ptftests/hash_test.py
+++ b/ansible/roles/test/files/ptftests/py3/hash_test.py
@@ -9,6 +9,7 @@
import random
import json
import time
+import six
from ipaddress import ip_address, ip_network
@@ -66,8 +67,8 @@ def setUp(self):
with open(ptf_test_port_map) as f:
self.ptf_test_port_map = json.load(f)
- self.src_ip_range = [unicode(x) for x in self.test_params['src_ip_range'].split(',')]
- self.dst_ip_range = [unicode(x) for x in self.test_params['dst_ip_range'].split(',')]
+ self.src_ip_range = [six.text_type(x) for x in self.test_params['src_ip_range'].split(',')]
+ self.dst_ip_range = [six.text_type(x) for x in self.test_params['dst_ip_range'].split(',')]
self.src_ip_interval = lpm.LpmDict.IpInterval(ip_address(self.src_ip_range[0]), ip_address(self.src_ip_range[1]))
self.dst_ip_interval = lpm.LpmDict.IpInterval(ip_address(self.dst_ip_range[0]), ip_address(self.dst_ip_range[1]))
self.vlan_ids = self.test_params.get('vlan_ids', [])
@@ -81,7 +82,7 @@ def setUp(self):
self.single_fib = self.test_params.get('single_fib_for_duts', False)
# set the base mac here to make it persistent across calls of check_ip_route
- self.base_mac = self.dataplane.get_mac(*random.choice(self.dataplane.ports.keys()))
+ self.base_mac = self.dataplane.get_mac(*random.choice(list(self.dataplane.ports.keys())))
def get_src_and_exp_ports(self, dst_ip):
while True:
@@ -155,7 +156,7 @@ def check_hash(self, hash_key):
self.check_balancing(next_hop.get_next_hop(), hit_count_map)
def check_ip_route(self, hash_key, src_port, dst_ip, dst_port_list):
- if ip_network(unicode(dst_ip)).version == 4:
+ if ip_network(six.text_type(dst_ip)).version == 4:
(matched_index, received) = self.check_ipv4_route(hash_key, src_port, dst_port_list)
else:
(matched_index, received) = self.check_ipv6_route(hash_key, src_port, dst_port_list)
diff --git a/ansible/roles/test/files/ptftests/py3/lpm.py b/ansible/roles/test/files/ptftests/py3/lpm.py
new file mode 120000
index 00000000000..865ef8c3395
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/py3/lpm.py
@@ -0,0 +1 @@
+../lpm.py
\ No newline at end of file
diff --git a/ansible/roles/test/files/ptftests/py3/macsec.py b/ansible/roles/test/files/ptftests/py3/macsec.py
new file mode 120000
index 00000000000..f8db57172ed
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/py3/macsec.py
@@ -0,0 +1 @@
+../macsec.py
\ No newline at end of file
diff --git a/ansible/roles/test/files/ptftests/remote.py b/ansible/roles/test/files/ptftests/remote.py
index dc74e9e2201..e3cb3ce9a0f 100644
--- a/ansible/roles/test/files/ptftests/remote.py
+++ b/ansible/roles/test/files/ptftests/remote.py
@@ -39,7 +39,7 @@ def build_ifaces_map(ifaces):
constants_file = os.path.join(os.path.dirname(__file__), "constants.yaml")
if os.path.exists(constants_file):
with open(constants_file) as fd:
- constants = yaml.load(fd)
+ constants = yaml.safe_load(fd)
ptf_port_mapping_mode = constants.get("PTF_PORT_MAPPING_MODE", ptf_port_mapping_mode)
diff --git a/tests/common/devices/ptf.py b/tests/common/devices/ptf.py
index 6d92178f38f..61de8a4683d 100644
--- a/tests/common/devices/ptf.py
+++ b/tests/common/devices/ptf.py
@@ -1,7 +1,15 @@
+import logging
+import pickle
+import tempfile
+
from tests.common.devices.base import AnsibleHostBase
+from tests.macsec.macsec_helper import load_macsec_info
+
+logger = logging.getLogger(__name__)
CHANGE_MAC_ADDRESS_SCRIPT = "scripts/change_mac.sh"
REMOVE_IP_ADDRESS_SCRIPT = "scripts/remove_ip.sh"
+MACSEC_INFO_FILE = "macsec_info.pickle"
@@ -11,7 +19,10 @@ class PTFHost(AnsibleHostBase):
Instance of this class can run ansible modules on the PTF host.
"""
- def __init__(self, ansible_adhoc, hostname):
+
+ def __init__(self, ansible_adhoc, hostname, duthost, tbinfo):
+ self.duthost = duthost
+ self.tbinfo = tbinfo
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
def change_mac_addresses(self):
@@ -20,4 +31,20 @@ def change_mac_addresses(self):
def remove_ip_addresses(self):
self.script(REMOVE_IP_ADDRESS_SCRIPT)
+ def create_macsec_info(self):
+ macsec_info = {}
+ for port_name, injected_port_id in self.duthost.get_extended_minigraph_facts(self.tbinfo)["minigraph_ptf_indices"].items():
+ try:
+ macsec_info[injected_port_id] = load_macsec_info(
+ self.duthost, port_name, force_reload=True)
+ except KeyError:
+ # If key error, It means the MACsec info isn't enabled in the specified port.
+ logging.info(
+ "MACsec isn't enabled on the port {}".format(port_name))
+ continue
+ tf = tempfile.NamedTemporaryFile(delete=True)
+ pickle.dump(macsec_info, tf)
+ tf.flush()
+ self.copy(src=tf.name, dest="/root/" + MACSEC_INFO_FILE)
+
# TODO: Add a method for running PTF script
diff --git a/tests/common/plugins/ptfadapter/__init__.py b/tests/common/plugins/ptfadapter/__init__.py
index 11493814f5f..1fc971323e1 100644
--- a/tests/common/plugins/ptfadapter/__init__.py
+++ b/tests/common/plugins/ptfadapter/__init__.py
@@ -39,18 +39,13 @@ def _send(test, port_id, pkt, count=1):
# Below code is to override the 'dp_poll' function in the ptf.testutils module. This function is called by all
# the other functions for receiving packets in the ptf.testutils module. Purpose of this overriding is to update
# the payload of received packet using the same method to match the updated injected packets.
+ origin_dp_poll = ptf.testutils.dp_poll
def _dp_poll(test, device_number=0, port_number=None, timeout=-1, exp_pkt=None):
update_payload = getattr(test, "update_payload", None)
if update_payload and callable(update_payload):
exp_pkt = test.update_payload(exp_pkt)
- result = test.dataplane.poll(
- device_number=device_number, port_number=port_number,
- timeout=timeout, exp_pkt=exp_pkt, filters=ptf.testutils.FILTERS
- )
- if isinstance(result, test.dataplane.PollSuccess):
- test.at_receive(result.packet, device_number=result.device, port_number=result.port)
- return result
+ return origin_dp_poll(test, device_number=device_number, port_number=port_number, timeout=timeout, exp_pkt=exp_pkt)
setattr(ptf.testutils, "dp_poll", _dp_poll)
@@ -102,7 +97,7 @@ def get_ifaces_map(ifaces, ptf_port_mapping_mode):
@pytest.fixture(scope='module')
-def ptfadapter(ptfhost, tbinfo, request):
+def ptfadapter(ptfhost, tbinfo, request, duthost):
"""return ptf test adapter object.
The fixture is module scope, because usually there is not need to
restart PTF nn agent and reinitialize data plane thread on every
@@ -156,6 +151,9 @@ def start_ptf_nn_agent():
node_id = request.module.__name__
adapter.payload_pattern = node_id + " "
+ adapter.duthost = duthost
+ adapter.mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
+
yield adapter
diff --git a/tests/conftest.py b/tests/conftest.py
index 94625436a3c..e18d1165423 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -386,12 +386,12 @@ def ptfhost(ansible_adhoc, tbinfo, duthost):
if "ptf_image_name" in tbinfo and "docker-keysight-api-server" in tbinfo["ptf_image_name"]:
return None
if "ptf" in tbinfo:
- return PTFHost(ansible_adhoc, tbinfo["ptf"])
+ return PTFHost(ansible_adhoc, tbinfo["ptf"], duthost, tbinfo)
else:
# when no ptf defined in testbed.csv
# try to parse it from inventory
ptf_host = duthost.host.options["inventory_manager"].get_host(duthost.hostname).get_vars()["ptf_host"]
- return PTFHost(ansible_adhoc, ptf_host)
+ return PTFHost(ansible_adhoc, ptf_host, duthost, tbinfo)
@pytest.fixture(scope="module")
diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py
index bd9697d558a..6bb8363eced 100644
--- a/tests/fib/test_fib.py
+++ b/tests/fib/test_fib.py
@@ -38,7 +38,7 @@
DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:FFFF:0:01::FFFF']
VLANIDS = range(1032, 1279)
VLANIP = '192.168.{}.1/24'
-PTF_QLEN = 2000
+PTF_QLEN = 20000
DEFAULT_MUX_SERVER_PORT = 8080
PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json'
@@ -90,7 +90,8 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu,
"single_fib_for_duts": single_fib_for_duts},
log_file=log_file,
qlen=PTF_QLEN,
- socket_recv_size=16384)
+ socket_recv_size=16384,
+ is_python3=True)
def get_vlan_untag_ports(duthosts, duts_running_config_facts):
@@ -275,4 +276,5 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s
},
log_file=log_file,
qlen=PTF_QLEN,
- socket_recv_size=16384)
+ socket_recv_size=16384,
+ is_python3=True)
diff --git a/tests/macsec/macsec_helper.py b/tests/macsec/macsec_helper.py
index 89effce71b6..614e0ab88eb 100644
--- a/tests/macsec/macsec_helper.py
+++ b/tests/macsec/macsec_helper.py
@@ -1,9 +1,12 @@
+from collections import defaultdict
import struct
import sys
import binascii
import time
+import re
import cryptography.exceptions
+import ptf
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
@@ -29,7 +32,7 @@
def check_wpa_supplicant_process(host, ctrl_port_name):
- cmd = "ps aux | grep 'wpa_supplicant' | grep '{}' | grep -v 'grep'".format(
+ cmd = "ps aux | grep -w 'wpa_supplicant' | grep -w '{}' | grep -v 'grep'".format(
ctrl_port_name)
output = host.shell(cmd)["stdout_lines"]
assert len(output) == 1, "The wpa_supplicant for the port {} wasn't started on the host {}".format(
@@ -303,36 +306,69 @@ def decap_macsec_pkt(macsec_pkt, sci, an, sak, encrypt, send_sci, pn, xpn_en=Fal
return pkt
-def check_macsec_pkt(macsec_attr, test, ptf_port_id, exp_pkt, timeout=3):
+def check_macsec_pkt(test, ptf_port_id, exp_pkt, timeout=3):
device, ptf_port = testutils.port_to_tuple(ptf_port_id)
- received_packets = []
- encrypt, send_sci, xpn_en, sci, an, sak, ssci, salt = macsec_attr
- end_time = time.time() + timeout
+ ret = testutils.dp_poll(
+ test, device_number=device, port_number=ptf_port, timeout=timeout, exp_pkt=exp_pkt)
+ if isinstance(ret, test.dataplane.PollSuccess):
+ return
+ else:
+ return ret.format()
+
+
+def find_portname_from_ptf_id(mg_facts, ptf_id):
+ for k, v in mg_facts["minigraph_ptf_indices"].items():
+ if ptf_id == v:
+ return k
+ return None
+
+
+def load_macsec_info(duthost, port, force_reload = None):
+ if force_reload or port not in __macsec_infos:
+ __macsec_infos[port] = get_macsec_attr(duthost, port)
+ return __macsec_infos[port]
+
+
+def macsec_dp_poll(test, device_number=0, port_number=None, timeout=None, exp_pkt=None):
+ recent_packets = []
+ packet_count = 0
+ if timeout is None:
+ timeout = ptf.ptfutils.default_timeout
+ force_reload = defaultdict(lambda: False)
+ if hasattr(test, "force_reload_macsec"):
+ force_reload = defaultdict(lambda: test.force_reload_macsec)
while True:
- cur_time = time.time()
- if cur_time > end_time:
- break
- ret = testutils.dp_poll(
- test, device_number=device, port_number=ptf_port, timeout=end_time - cur_time, exp_pkt=None)
- if isinstance(ret, test.dataplane.PollFailure):
- break
- # If the packet isn't MACsec type
+ start_time = time.time()
+ ret = __origin_dp_poll(
+ test, device_number=device_number, port_number=port_number, timeout=timeout, exp_pkt=None)
+ timeout -= time.time() - start_time
+ # The device number of PTF host is 0, if the target port isn't a injected port(belong to ptf host), Don't need to do MACsec further.
+ if ret.device != 0 \
+ or isinstance(ret, test.dataplane.PollFailure) \
+ or exp_pkt is None:
+ return ret
pkt = scapy.Ether(ret.packet)
if pkt[scapy.Ether].type != 0x88e5:
- continue
- received_packets.append(pkt)
- for i in range(len(received_packets)):
- pkt = received_packets[i]
- pn = 0
- pkt = decap_macsec_pkt(pkt, sci, an, sak, encrypt,
- send_sci, pn, xpn_en, ssci, salt)
- if not pkt:
- continue
- received_packets[i] = pkt
- if exp_pkt.pkt_match(pkt):
- return
- fail_message = "Expect pkt \n{}\n{}\nBut received \n".format(
- exp_pkt, exp_pkt.exp_pkt.show(dump=True))
- for packet in received_packets:
- fail_message += "\n{}\n".format(packet.show(dump=True))
- return fail_message
+ if ptf.dataplane.match_exp_pkt(exp_pkt, pkt):
+ return ret
+ else:
+ continue
+ macsec_info = load_macsec_info(test.duthost, find_portname_from_ptf_id(test.mg_facts, ret.port), force_reload[ret.port])
+ if macsec_info:
+ encrypt, send_sci, xpn_en, sci, an, sak, ssci, salt = macsec_info
+ force_reload[ret.port] = False
+ pkt = decap_macsec_pkt(pkt, sci, an, sak, encrypt,
+ send_sci, 0, xpn_en, ssci, salt)
+ if pkt is not None and ptf.dataplane.match_exp_pkt(exp_pkt, pkt):
+ return ret
+ recent_packets.append(pkt)
+ packet_count += 1
+ if timeout <= 0:
+ break
+ return test.dataplane.PollFailure(exp_pkt, recent_packets,packet_count)
+
+
+__origin_dp_poll = testutils.dp_poll
+__macsec_infos = defaultdict(lambda: None)
+testutils.dp_poll = macsec_dp_poll
+
diff --git a/tests/macsec/test_macsec.py b/tests/macsec/test_macsec.py
index 925202941f7..909de6c259d 100644
--- a/tests/macsec/test_macsec.py
+++ b/tests/macsec/test_macsec.py
@@ -91,12 +91,14 @@ def test_rekey_by_period(self, duthost, ctrl_links, upstream_links, rekey_period
class TestDataPlane():
BATCH_COUNT = 10
- def test_server_to_neighbor(self, duthost, ctrl_links, downstream_links, upstream_links, nbr_device_numbers, nbr_ptfadapter):
- nbr_ptfadapter.dataplane.set_qlen(TestDataPlane.BATCH_COUNT * 10)
+ def test_server_to_neighbor(self, duthost, ctrl_links, downstream_links, upstream_links, ptfadapter):
+ ptfadapter.dataplane.set_qlen(TestDataPlane.BATCH_COUNT * 10)
down_link = downstream_links.values()[0]
dut_macaddress = duthost.get_dut_iface_mac(ctrl_links.keys()[0])
+ setattr(ptfadapter, "force_reload_macsec", True)
+
for portchannel in get_portchannel(duthost).values():
members = portchannel["members"]
@@ -128,18 +130,12 @@ def test_server_to_neighbor(self, duthost, ctrl_links, downstream_links, upstrea
"00:01:02:03:04:05", dut_macaddress, "1.2.3.4", up_host_ip, bytes(payload))
exp_pkt = create_exp_pkt(pkt, pkt[scapy.IP].ttl - 1)
- testutils.send_packet(
- nbr_ptfadapter, down_link["ptf_port_id"], pkt, TestDataPlane.BATCH_COUNT)
- testutils.verify_packet_any_port(
- nbr_ptfadapter, exp_pkt, ports=peer_ports, device_number=nbr_device_numbers[up_host_name], timeout=3)
-
fail_message = ""
for port_name in members:
up_link = upstream_links[port_name]
- macsec_attr = get_macsec_attr(duthost, port_name)
testutils.send_packet(
- nbr_ptfadapter, down_link["ptf_port_id"], pkt, TestDataPlane.BATCH_COUNT)
- result = check_macsec_pkt(macsec_attr=macsec_attr, test=nbr_ptfadapter,
+ ptfadapter, down_link["ptf_port_id"], pkt, TestDataPlane.BATCH_COUNT)
+ result = check_macsec_pkt(test=ptfadapter,
ptf_port_id=up_link["ptf_port_id"], exp_pkt=exp_pkt, timeout=3)
if result is None:
return
@@ -152,7 +148,7 @@ def test_dut_to_neighbor(self, duthost, ctrl_links, upstream_links):
"ping -c {} {}".format(4, up_link['local_ipv4_addr']))
assert not ret['failed']
- def test_neighbor_to_neighbor(self, duthost, ctrl_links, upstream_links, nbr_device_numbers, nbr_ptfadapter):
+ def test_neighbor_to_neighbor(self, duthost, ctrl_links, upstream_links, nbr_device_numbers):
portchannels = get_portchannel(duthost).values()
for i in range(len(portchannels)):
assert portchannels[i]["members"]
diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py
index f5589e3c260..bdb3721e160 100644
--- a/tests/ptf_runner.py
+++ b/tests/ptf_runner.py
@@ -64,6 +64,10 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={},
if timeout:
cmd += " --test-case-timeout {}".format(int(timeout))
+ # MACsec is only available in Python3
+ if is_python3:
+ host.create_macsec_info()
+
try:
result = host.shell(cmd, chdir="/root", module_ignore_errors=module_ignore_errors)
if module_ignore_errors:
From 51092b93a58fe114e661e9af96793b204433d8ef Mon Sep 17 00:00:00 2001
From: Anton Ptashnik
Date: Mon, 30 May 2022 08:30:27 +0300
Subject: [PATCH 12/23] fixed cleanup for test_default_cfg_after_load_mg:
disabled pfcwd after the test execution (#5392)
What is the motivation for this PR?
Cleanup PFCWD after test_default_cfg_after_load_mg test
How did you do it?
Disabled the feature in running and permanent config
How did you verify/test it?
py.test --inventory=../ansible/lab,../ansible/veos --testbed_file=../ansible/testbed.csv --module-path=../ansible/library -v -rA --topology=t1,any pfcwd/test_pfc_config.py -k test_default_cfg_after_load_mg
---
tests/pfcwd/conftest.py | 3 ---
tests/pfcwd/test_pfc_config.py | 16 +++++++++++-----
2 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py
index 3126d00cc8a..311627d89bc 100644
--- a/tests/pfcwd/conftest.py
+++ b/tests/pfcwd/conftest.py
@@ -165,6 +165,3 @@ def setup_pfc_test(
# set poll interval
duthost.command("pfcwd interval {}".format(setup_info['pfc_timers']['pfc_wd_poll_time']))
yield setup_info
-
- logger.info("--- Starting Pfcwd ---")
- duthost.command("pfcwd start_default")
diff --git a/tests/pfcwd/test_pfc_config.py b/tests/pfcwd/test_pfc_config.py
index 46cb7145adc..691d734d335 100644
--- a/tests/pfcwd/test_pfc_config.py
+++ b/tests/pfcwd/test_pfc_config.py
@@ -115,9 +115,13 @@ def cfg_setup(setup_pfc_test, duthosts, rand_one_dut_hostname):
logger.info("--- Clean up config dir from DUT ---")
cfg_teardown(duthost)
-def update_init_cfg_file(duthost, default_pfcwd_value):
+
+def update_pfcwd_default_state(duthost, filepath, default_pfcwd_value):
"""
- Set default_pfcwd_status in /etc/sonic/init_cfg.json with parameter default_pfcwd_value
+ Set default_pfcwd_status in the specified file with parameter default_pfcwd_value
+ The path is expected to be one of:
+ - /etc/sonic/init_cfg.json
+ - /etc/sonic/config_db.json
Args:
duthost (AnsibleHost): instance
@@ -133,7 +137,7 @@ def update_init_cfg_file(duthost, default_pfcwd_value):
else:
pytest.fail("There is no default_pfcwd_status in /etc/sonic/init_cfg.json.")
- sed_command = "sed -i \'s/\"default_pfcwd_status\": \"{}\"/\"default_pfcwd_status\": \"{}\"/g\' /etc/sonic/init_cfg.json".format(original_value, default_pfcwd_value)
+ sed_command = "sed -i \'s/\"default_pfcwd_status\": \"{}\"/\"default_pfcwd_status\": \"{}\"/g\' {}".format(original_value, default_pfcwd_value, filepath)
duthost.shell(sed_command)
return original_value
@@ -149,7 +153,8 @@ def mg_cfg_teardown(duthost, default_pfcwd_value):
Returns:
None
"""
- update_init_cfg_file(duthost, default_pfcwd_value)
+ update_pfcwd_default_state(duthost, '/etc/sonic/init_cfg.json', default_pfcwd_value)
+ update_pfcwd_default_state(duthost, '/etc/sonic/config_db.json', default_pfcwd_value)
@pytest.fixture(scope='class', autouse=True)
def mg_cfg_setup(duthosts, rand_one_dut_hostname):
@@ -167,7 +172,7 @@ def mg_cfg_setup(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
logger.info("Enable pfcwd in configuration file")
- original_pfcwd_value = update_init_cfg_file(duthost, "enable")
+ original_pfcwd_value = update_pfcwd_default_state(duthost, "/etc/sonic/init_cfg.json", "enable")
yield
logger.info("--- Start running default pfcwd config test---")
@@ -186,6 +191,7 @@ def stop_pfcwd(duthosts, rand_one_dut_hostname):
Returns:
None
"""
+ yield
duthost = duthosts[rand_one_dut_hostname]
logger.info("--- Stop Pfcwd --")
duthost.command("pfcwd stop")
From 74251aabc3a45bbd441296c1b6da439ee98e48d8 Mon Sep 17 00:00:00 2001
From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com>
Date: Mon, 30 May 2022 14:21:13 +0800
Subject: [PATCH 13/23] Add a new category 'asic_gen' in dut basic facts.
(#5721)
Description of PR
Test cases under the tests/sub_port_interfaces only support td2 asic generation, we add a new category 'asic_gen' in dut basic facts to skip unsupported asic in advance.
What is the motivation for this PR?
Test cases under the tests/sub_port_interfaces only support td2 asic generation, we add a new category 'asic_gen' in dut basic facts to skip unsupported asic in advance.
How did you do it?
Get the asic generation infomation from ansible/group_vars/sonic/variables, and add it in dut basic facts. Also, modify the conditions in tests_mark_conditions.yaml to skip cases.
How did you verify/test it?
Running test cases on support/unsupport asic.
Signed-off-by: Yutong Zhang
---
.../common/plugins/conditional_mark/README.md | 1 +
.../plugins/conditional_mark/__init__.py | 31 +++++++++++++++++++
.../tests_mark_conditions.yaml | 4 +--
tests/sub_port_interfaces/conftest.py | 14 ---------
4 files changed, 34 insertions(+), 16 deletions(-)
diff --git a/tests/common/plugins/conditional_mark/README.md b/tests/common/plugins/conditional_mark/README.md
index 01dac7fe518..f7f8086dd2c 100644
--- a/tests/common/plugins/conditional_mark/README.md
+++ b/tests/common/plugins/conditional_mark/README.md
@@ -134,6 +134,7 @@ Example variables can be used in condition string:
"asic_type": "vs",
"num_asic": 1,
"is_multi_asic": False,
+ "asic_gen": "td2"
}
```
diff --git a/tests/common/plugins/conditional_mark/__init__.py b/tests/common/plugins/conditional_mark/__init__.py
index f091dabf390..5beeb680854 100644
--- a/tests/common/plugins/conditional_mark/__init__.py
+++ b/tests/common/plugins/conditional_mark/__init__.py
@@ -19,6 +19,7 @@
logger = logging.getLogger(__name__)
DEFAULT_CONDITIONS_FILE = 'common/plugins/conditional_mark/tests_mark_conditions*.yaml'
+ASIC_NAME_PATH = '/../../../../ansible/group_vars/sonic/variables'
def pytest_addoption(parser):
"""Add options for the conditional mark plugin.
@@ -85,6 +86,35 @@ def load_conditions(session):
return conditions_list
+def read_asic_name(hwsku):
+ '''
+ Get asic generation name from file 'ansible/group_vars/sonic/variables'
+
+ Args:
+ hwsku (str): Dut hwsku name
+
+ Returns:
+ str or None: Return the asic generation name or None if something went wrong or nothing found in the file.
+
+ '''
+ asic_name_file = os.path.dirname(__file__) + ASIC_NAME_PATH
+ try:
+ with open(asic_name_file) as f:
+ asic_name = yaml.safe_load(f)
+ logger.info(asic_name)
+
+ for key, value in asic_name.items():
+ if ('td' not in key) and ('th' not in key):
+ asic_name.pop(key)
+
+ for name, hw in asic_name.items():
+ if hwsku in hw:
+ return name.split('_')[1]
+
+ return "unknown"
+
+ except IOError as e:
+ return None
def load_dut_basic_facts(session):
"""Run 'ansible -m dut_basic_facts' command to get some basic DUT facts.
@@ -124,6 +154,7 @@ def load_dut_basic_facts(session):
output_fields = raw_output.split('SUCCESS =>', 1)
if len(output_fields) >= 2:
results.update(json.loads(output_fields[1].strip())['ansible_facts']['dut_basic_facts'])
+ results['asic_gen'] = read_asic_name(results['hwsku'])
except Exception as e:
logger.error('Failed to load dut basic facts, exception: {}'.format(repr(e)))
diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml
index 6065a76b6a3..17808eae5a6 100644
--- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml
+++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml
@@ -313,9 +313,9 @@ ssh/test_ssh_stress.py::test_ssh_stress:
#######################################
sub_port_interfaces:
skip:
- reason: "sub port interfaces test is not yet supported on multi-ASIC platform"
+ reason: "Unsupported platform or asic"
conditions:
- - "is_multi_asic==True"
+ - "is_multi_asic==True or asic_gen not in ['td2']"
sub_port_interfaces/test_sub_port_interfaces.py::TestSubPorts::test_tunneling_between_sub_ports:
skip:
diff --git a/tests/sub_port_interfaces/conftest.py b/tests/sub_port_interfaces/conftest.py
index a98865f1a9e..201a3bf8da6 100644
--- a/tests/sub_port_interfaces/conftest.py
+++ b/tests/sub_port_interfaces/conftest.py
@@ -7,7 +7,6 @@
from tests.common import config_reload
from tests.common.helpers.assertions import pytest_assert as py_assert
-from tests.common.utilities import get_host_visible_vars
from tests.common.utilities import wait_until
from tests.common.ptf_agent_updater import PtfAgentUpdater
from tests.common import constants
@@ -53,19 +52,6 @@ def pytest_addoption(parser):
help="Max numbers of sub-ports for test_max_numbers_of_sub_ports test case",
)
-
-@pytest.fixture(scope='module', autouse=True)
-def skip_unsupported_asic_type(duthost):
- SUBPORT_UNSUPPORTED_ASIC_LIST = ["th2"]
- vendor = duthost.facts["asic_type"]
- hostvars = get_host_visible_vars(duthost.host.options['inventory'], duthost.hostname)
- for asic in SUBPORT_UNSUPPORTED_ASIC_LIST:
- vendorAsic = "{0}_{1}_hwskus".format(vendor, asic)
- if vendorAsic in hostvars.keys() and duthost.facts['hwsku'] in hostvars[vendorAsic]:
- pytest.skip(
- "Skipping test since subport is not supported on {0} {1} platforms".format(vendor, asic))
-
-
@pytest.fixture(params=['port', 'port_in_lag'])
def port_type(request):
"""Port type to test, could be either port or port-channel."""
From 652ebdbcd74df912ccda64465f8f8dff1938569a Mon Sep 17 00:00:00 2001
From: Zhaohui Sun <94606222+ZhaohuiS@users.noreply.github.com>
Date: Mon, 30 May 2022 20:08:14 +0800
Subject: [PATCH 14/23] try ssh again with ansible_altpassword when catching
AuthencicationException (#5731)
What is the motivation for this PR?
test_ssh_limit.py is not skipped on master image. But failed with AuthenticationException because password for master image is different with 202012 image.
How did you do it?
Try ssh again with ansible_altpassword when catching AuthencicationException.
So many test cases try this method, such as
tests/ssh/test_ssh_ciphers.py
duthost_console function in tests/conftest.py
ansible/roles/test/files/ptftests/device_connection.py
How did you verify/test it?
Run ssh/test_ssh_limit.py
Signed-off-by: Zhaohui Sun
---
tests/ssh/test_ssh_limit.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/tests/ssh/test_ssh_limit.py b/tests/ssh/test_ssh_limit.py
index e41e4457c80..6b4dc25ed80 100755
--- a/tests/ssh/test_ssh_limit.py
+++ b/tests/ssh/test_ssh_limit.py
@@ -55,9 +55,14 @@ def modify_templates(duthost, tacacs_creds, creds):
hwsku = duthost.facts["hwsku"]
type = get_device_type(duthost)
user = tacacs_creds['local_user']
-
- # Duthost shell not support run command with J2 template in command text.
- admin_session = ssh_connect_remote(dut_ip, creds['sonicadmin_user'], creds['sonicadmin_password'])
+
+ try:
+ # Duthost shell not support run command with J2 template in command text.
+ admin_session = ssh_connect_remote(dut_ip, creds['sonicadmin_user'], creds['sonicadmin_password'])
+ except paramiko.AuthenticationException:
+ # try ssh with ansible_altpassword again
+ sonic_admin_alt_password = duthost.host.options['variable_manager']._hostvars[duthost.hostname].get("ansible_altpassword")
+ admin_session = ssh_connect_remote(dut_ip, creds['sonicadmin_user'], sonic_admin_alt_password)
# Backup and change /usr/share/sonic/templates/pam_limits.j2
additional_content = "session required pam_limits.so"
From 0ef874f3c020f65a55501dfdfdde068a8edb6b67 Mon Sep 17 00:00:00 2001
From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com>
Date: Mon, 30 May 2022 22:51:48 +0800
Subject: [PATCH 15/23] [dualtor][icmp_responder] Fix icmp responder (#5735)
### Approach
#### What is the motivation for this PR?
Fix that `icmp_responder` responds to 24 ports at most.
Signed-off-by: Longxiang Lyu
#### How did you do it?
For each thread, run event loop polling for multiple ports if ports number > 24.
#### How did you verify/test it?
Run `icmp_responder` and every mux port is in healthy state.
---
tests/scripts/icmp_responder.py | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/tests/scripts/icmp_responder.py b/tests/scripts/icmp_responder.py
index 1f29717aa06..3f16aa3e38d 100644
--- a/tests/scripts/icmp_responder.py
+++ b/tests/scripts/icmp_responder.py
@@ -29,7 +29,7 @@ class ICMPSniffer(object):
"""Sniff ICMP packets."""
TYPE_ECHO_REQUEST = 8
- def __init__(self, iface, request_handler=None, dst_mac=None):
+ def __init__(self, ifaces, request_handler=None, dst_mac=None):
"""
Init ICMP sniffer.
@@ -39,8 +39,9 @@ def __init__(self, iface, request_handler=None, dst_mac=None):
"""
self.sniff_sockets = []
self.iface_hwaddr = {}
- self.sniff_sockets.append(conf.L2socket(type=ETH_P_IP, iface=iface, filter="icmp"))
- self.iface_hwaddr[iface] = get_if_hwaddr(iface)
+ for iface in ifaces:
+ self.sniff_sockets.append(conf.L2socket(type=ETH_P_IP, iface=iface, filter="icmp"))
+ self.iface_hwaddr[iface] = get_if_hwaddr(iface)
self.request_handler = request_handler
self.dst_mac = dst_mac
@@ -66,7 +67,12 @@ def __call__(self):
ifaces = args.ifaces
dst_mac = args.dst_mac
- with ThreadPoolExecutor(max_workers=24) as executor:
- for iface in ifaces:
- icmp_sniffer = ICMPSniffer(iface, request_handler=respond_to_icmp_request, dst_mac=dst_mac)
+ max_workers = 24 if len(ifaces) > 24 else len(ifaces)
+ sniffed_ifaces = [[] for _ in range(max_workers)]
+ for i, iface in enumerate(ifaces):
+ sniffed_ifaces[i % max_workers].append(iface)
+
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
+ for ifaces in sniffed_ifaces:
+ icmp_sniffer = ICMPSniffer(ifaces, request_handler=respond_to_icmp_request, dst_mac=dst_mac)
executor.submit(icmp_sniffer)
From 54abadea1a924be241148efaef38eed24aeced38 Mon Sep 17 00:00:00 2001
From: jingwenxie
Date: Mon, 30 May 2022 17:00:15 -0700
Subject: [PATCH 16/23] [configlet] Add test_add_rack to KVM daily test (#5689)
Summary: Add test_add_rack to KVM daily test
What is the motivation for this PR?
Add test_add_rack to KVM daily test
How did you do it?
Add test_add_rack to KVM daily test
How did you verify/test it?
Run E2E test.
---
tests/configlet/test_add_rack.py | 15 +++++++++++++++
tests/kvmtest.sh | 1 +
2 files changed, 16 insertions(+)
diff --git a/tests/configlet/test_add_rack.py b/tests/configlet/test_add_rack.py
index dd83634e0cb..1a61774740c 100644
--- a/tests/configlet/test_add_rack.py
+++ b/tests/configlet/test_add_rack.py
@@ -2,6 +2,7 @@
import pytest
import sys
+from tests.common.utilities import skip_release
sys.path.append("./configlet/util")
@@ -12,6 +13,20 @@
pytest.mark.topology("t1")
]
+
+@pytest.fixture(scope="module", autouse=True)
+def check_image_version(duthost):
+ """Skips this test if the SONiC image installed on DUT is older than 202111
+
+ Args:
+ duthost: DUT host object.
+
+ Returns:
+ None.
+ """
+ skip_release(duthost, ["201811", "201911", "202012", "202106", "202111"])
+
+
@pytest.fixture(scope="module")
def configure_dut(duthosts, rand_one_dut_hostname):
try:
diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh
index 0d8128a2eeb..2b07e7c48e6 100755
--- a/tests/kvmtest.sh
+++ b/tests/kvmtest.sh
@@ -230,6 +230,7 @@ test_t1_lag() {
bgp/test_bgp_update_timer.py \
bgp/test_bgpmon.py \
bgp/test_traffic_shift.py \
+ configlet/test_add_rack.py \
container_checker/test_container_checker.py \
http/test_http_copy.py \
ipfwd/test_mtu.py \
From 7063b36b6fcca8ab8c9eadb1fc8a0cfbb9b910c1 Mon Sep 17 00:00:00 2001
From: "microsoft-github-policy-service[bot]"
<77245923+microsoft-github-policy-service[bot]@users.noreply.github.com>
Date: Tue, 31 May 2022 08:29:59 +0800
Subject: [PATCH 17/23] Microsoft mandatory file SECURITY.MD (#5682)
Co-authored-by: microsoft-github-policy-service[bot] <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com>
---
SECURITY.md | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
create mode 100644 SECURITY.md
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000000..869fdfe2b24
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
From a923c95803167d5fb1551318a33333d52da23226 Mon Sep 17 00:00:00 2001
From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com>
Date: Tue, 31 May 2022 09:13:28 +0800
Subject: [PATCH 18/23] add backup kusto for test result data (#5729)
What is the motivation for this PR?
To support kusto cluster HA for sonic test data
How did you do it?
To add a backup Kusto cluster for test data upload.
How did you verify/test it?
Verified by manually run the pipeline and check the results in Kusto.
---
test_reporting/report_data_storage.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/test_reporting/report_data_storage.py b/test_reporting/report_data_storage.py
index 5948146529c..cb4ca1d1763 100644
--- a/test_reporting/report_data_storage.py
+++ b/test_reporting/report_data_storage.py
@@ -140,6 +140,25 @@ def __init__(self, db_name: str):
tenant_id)
self._ingestion_client = KustoIngestClient(kcsb)
+ """
+ Kusto performance depends on the work load of cluster, to improve the high availability of test result data service
+ by hosting a backup cluster, which is optional.
+ """
+ ingest_cluster = os.getenv("TEST_REPORT_INGEST_KUSTO_CLUSTER_BACKUP")
+ tenant_id = os.getenv("TEST_REPORT_AAD_TENANT_ID_BACKUP")
+ service_id = os.getenv("TEST_REPORT_AAD_CLIENT_ID_BACKUP")
+ service_key = os.getenv("TEST_REPORT_AAD_CLIENT_KEY_BACKUP")
+
+ if not ingest_cluster or not tenant_id or not service_id or not service_key:
+ print("Could not load backup Kusto Credentials from environment")
+ self._ingestion_client_backup = None
+ else:
+ kcsb = KustoConnectionStringBuilder.with_aad_application_key_authentication(ingest_cluster,
+ service_id,
+ service_key,
+ tenant_id)
+ self._ingestion_client_backup = KustoIngestClient(kcsb)
+
def upload_report(self, report_json: Dict, external_tracking_id: str = "", report_guid: str = "") -> None:
"""Upload a report to the back-end data store.
@@ -238,3 +257,5 @@ def _ingest_data(self, table, data):
temp.write(json.dumps(data))
temp.seek(0)
self._ingestion_client.ingest_from_file(temp.name, ingestion_properties=props)
+ if self._ingestion_client_backup:
+ self._ingestion_client_backup.ingest_from_file(temp.name, ingestion_properties=props)
From 19c13fff5ae909b8839dc14f1f8d1d9dd71cd07d Mon Sep 17 00:00:00 2001
From: Jing Zhang
Date: Tue, 31 May 2022 09:54:24 -0700
Subject: [PATCH 19/23] [python3] migrate `ip_in_ip_tunnel_test.py` to python3
(#5714)
### Description of PR
Summary:
Fixes # (issue)
Following instructions to migrate ptf test script `ip_in_ip_tunnel_test.py` to python3.
Sign-off: Jing Zhang zhangjing@microsoft.com
### Type of change
- [x] Test case(new/improvement)
### Approach
#### What is the motivation for this PR?
#### How did you do it?
1. use `2to3` to update `ip_in_ip_tunnel_test.py` script.
2. add `is_python3==True` argument to ptf_runner called in dual_tor_utils.py.
#### How did you verify/test it?
1. dualtor/test_orchagent_standby_tor_downstream.py tests
```
dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_loopback_route_readded[ipv4] PASSED
dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_loopback_route_readded[ipv6] PASSED
```
---
.../test/files/ptftests/{ => py3}/ip_in_ip_tunnel_test.py | 4 ++--
tests/common/dualtor/dual_tor_utils.py | 3 ++-
2 files changed, 4 insertions(+), 3 deletions(-)
rename ansible/roles/test/files/ptftests/{ => py3}/ip_in_ip_tunnel_test.py (98%)
diff --git a/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py b/ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
similarity index 98%
rename from ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
rename to ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
index 94c0a04a268..ebde26d4422 100644
--- a/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
+++ b/ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
@@ -24,8 +24,8 @@
PACKET_NUM_FOR_NEGATIVE_CHECK = 100
DIFF = 0.25 # The valid range for balance check
-SRC_IP_RANGE = [unicode('8.0.0.0'), unicode('8.255.255.255')]
-SRC_IPV6_RANGE = [unicode('20D0:A800:0:00::'), unicode('20D0:FFFF:0:00::FFFF')]
+SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255']
+SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF']
TIMEOUT = 1
class IpinIPTunnelTest(BaseTest):
diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py
index 404e3674bef..05ef16e398b 100644
--- a/tests/common/dualtor/dual_tor_utils.py
+++ b/tests/common/dualtor/dual_tor_utils.py
@@ -748,7 +748,8 @@ def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, stan
params=params,
log_file=log_file,
qlen=2000,
- socket_recv_size=16384)
+ socket_recv_size=16384,
+ is_python3=True)
def generate_hashed_packet_to_server(ptfadapter, duthost, hash_key, target_server_ip, count=1):
From b5580bc4316f4387708accb7f6f05492ba83ff42 Mon Sep 17 00:00:00 2001
From: Vaibhav Hemant Dixit
Date: Tue, 31 May 2022 13:20:42 -0700
Subject: [PATCH 20/23] Update regex for FDB learning to work on both 201911
and 202012 (#5745)
Fix issue: Warmboot tests are failing on 201911 images due to the below regex being set to work on 202012 images only.
On 202012:
str2-msn4600c-acs-03 NOTICE swss#orchagent: :- doAppSwitchTableTask: Set switch attribute fdb_aging_time to 600
On 201911:
str-msn2700-22 NOTICE swss#orchagent: :- doTask: Set switch attribute fdb_aging_time to 600
Changes:
Updated the regex to work on both 201911 and 202012 images.
Changed doAppSwitchTableTask to do.*Task.
---
tests/platform_tests/reboot_timing_constants.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/platform_tests/reboot_timing_constants.py b/tests/platform_tests/reboot_timing_constants.py
index 409c3cef2ea..956890aa67f 100644
--- a/tests/platform_tests/reboot_timing_constants.py
+++ b/tests/platform_tests/reboot_timing_constants.py
@@ -24,7 +24,7 @@
"ROUTE_DEFERRAL_TIMER|Start": re.compile(r'.*ADJCHANGE: neighbor .* in vrf default Up.*'),
"ROUTE_DEFERRAL_TIMER|End": re.compile(r'.*rcvd End-of-RIB for IPv4 Unicast from.*'),
"FDB_AGING_DISABLE|Start": re.compile(r'.*NOTICE swss#orchagent.*setAgingFDB: Set switch.*fdb_aging_time 0 sec'),
- "FDB_AGING_DISABLE|End": re.compile(r'.*NOTICE swss#orchagent.*doAppSwitchTableTask: Set switch attribute fdb_aging_time to 600')
+ "FDB_AGING_DISABLE|End": re.compile(r'.*NOTICE swss#orchagent.*do.*Task: Set switch attribute fdb_aging_time to 600')
},
"LATEST": {
"INIT_VIEW|Start": re.compile(r'.*swss#orchagent.*notifySyncd.*sending syncd.*INIT_VIEW.*'),
From 0ed062de8fd7dd1769004d00510776397e4c0b31 Mon Sep 17 00:00:00 2001
From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com>
Date: Wed, 1 Jun 2022 07:33:21 +0800
Subject: [PATCH 21/23] [nic_simulator] Use port `50075` (#5744)
Approach
What is the motivation for this PR?
Use port 50075 for kvm dualtor-mixed testbed nic_simulator.
Signed-off-by: Longxiang Lyu lolv@microsoft.com
How did you do it?
How did you verify/test it?
---
ansible/group_vars/all/nic_simulator_grpc_port_map.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ansible/group_vars/all/nic_simulator_grpc_port_map.yml b/ansible/group_vars/all/nic_simulator_grpc_port_map.yml
index faf83ef1891..c82d6cec0a6 100644
--- a/ansible/group_vars/all/nic_simulator_grpc_port_map.yml
+++ b/ansible/group_vars/all/nic_simulator_grpc_port_map.yml
@@ -2,4 +2,4 @@
# mapping from testbed name to nic simulator gRPC binding port
nic_simulator_grpc_port:
- vms-kvm-dual-mixed: 8900
+ vms-kvm-dual-mixed: 50075
From db8dde2e02a668a7074823a5791c8ab73a3aa67c Mon Sep 17 00:00:00 2001
From: Ze Gan
Date: Tue, 31 May 2022 16:58:26 -0700
Subject: [PATCH 22/23] Update run-test-template.yml (#5737)
What is the motivation for this PR?
If the sonic-buildimage is partially failed, sonic-mgmt cannot fetch the latest image for testing.
How did you do it?
Update the configuration of Azp for allowing partially succeeded build.
---
.azure-pipelines/run-test-template.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.azure-pipelines/run-test-template.yml b/.azure-pipelines/run-test-template.yml
index 6dfb174fb5d..acc053a5a30 100644
--- a/.azure-pipelines/run-test-template.yml
+++ b/.azure-pipelines/run-test-template.yml
@@ -31,6 +31,7 @@ steps:
artifact: sonic-buildimage.vs
runVersion: 'latestFromBranch'
runBranch: 'refs/heads/master'
+ allowPartiallySucceededBuilds: true
displayName: "Download sonic kvm image"
- script: |
From 6798d2bd3e91071ca7ff92316ae6833e79803a84 Mon Sep 17 00:00:00 2001
From: Andrii-Yosafat Lozovyi
Date: Wed, 1 Jun 2022 03:28:38 +0300
Subject: [PATCH 23/23] [Vrf] Added extra check for TestVrfCapacity (#5328)
Summary: When executing test cases under TestVrfCapacity it is seen that setup configuration may still be applying on DUT when test cases are already started, which leads to fail of test cases test_ping or test_ip_fwd. It was spotted that adding of around 2000 vlan members may still be in progress when test case is started. In order to make sure that this setup is finished, a check for vlan members was added.
Approach
What is the motivation for this PR?
Make TestVrfCapacity test cases more stable
How did you do it?
How did you verify/test it?
Run TestVrfCapacity on t0 topology
Any platform specific information?
SONiC Software Version: SONiC.master.79661-dirty-20220309.185027
Distribution: Debian 11.2
Kernel: 5.10.0-8-2-amd64
Build commit: 3fa18d18d
Build date: Wed Mar 9 18:58:08 UTC 2022
Built by: AzDevOps@sonic-build-workers-0018RF
Platform: x86_64-accton_as9516_32d-r0
HwSKU: newport
Signed-off-by: Andrii-Yosafat Lozovyi
---
tests/vrf/test_vrf.py | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/tests/vrf/test_vrf.py b/tests/vrf/test_vrf.py
index 467e2a16a32..9a7932f1e1c 100644
--- a/tests/vrf/test_vrf.py
+++ b/tests/vrf/test_vrf.py
@@ -21,6 +21,7 @@
from tests.ptf_runner import ptf_runner
from tests.common.utilities import wait_until
from tests.common.reboot import reboot
+from tests.common.helpers.assertions import pytest_assert
"""
During vrf testing, a vrf basic configuration need to be setup before any tests,
@@ -383,6 +384,16 @@ def get_dut_vlan_ptf_ports(mg_facts):
ports.add(mg_facts['minigraph_port_indices'][member])
return ports
+def check_vlan_members(duthost, member1, member2, exp_count):
+ out1 = duthost.shell("redis-cli -n 6 keys 'VLAN_MEMBER_TABLE|*|{}' | wc -l".format(member1))['stdout']
+ out2 = duthost.shell("redis-cli -n 6 keys 'VLAN_MEMBER_TABLE|*|{}' | wc -l".format(member2))['stdout']
+ added = int(out1) + int(out2)
+ if added >= exp_count * 2:
+ logger.info('All vlan members added')
+ return True
+ logger.info('Not all vlan members are added, {} when expected => {}'.format(added, (exp_count * 2)))
+ return False
+
# fixtures
@pytest.fixture(scope="module")
@@ -1217,7 +1228,11 @@ def setup_vrf_capacity(self, duthosts, rand_one_dut_hostname, ptfhost, localhost
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
- time.sleep(attrs['add_sleep_time'])
+ if cfg_name == 'vlan_member':
+ pytest_assert(wait_until(220, 10, 0, check_vlan_members, duthost, dut_port1, dut_port2, vrf_count),
+ "Not all vlan members were added by the end of timeout")
+ else:
+ time.sleep(attrs['add_sleep_time'])
# setup static routes
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")