diff --git a/config/main.py b/config/main.py index 6588bc1ef3..4f3ce4b6fc 100644 --- a/config/main.py +++ b/config/main.py @@ -3906,72 +3906,117 @@ def remove(ctx, interface_name, ip_addr): # # buffer commands and utilities # -def pgmaps_check_legality(ctx, interface_name, input_pg, is_new_pg): +def buffer_objects_map_check_legality(ctx, db, interface_name, input_map, is_new_id, is_pg): """ - Tool function to check whether input_pg is legal. + Tool function to check whether input_map is legal. Three checking performed: - 1. Whether the input_pg is legal: pgs are in range [0-7] - 2. Whether the input_pg overlaps an existing pg in the port + 1. Whether the input_map is legal: pgs are in range [0-7] + 2. Whether the input_map overlaps an existing pg in the port """ - config_db = ctx.obj["config_db"] + def _parse_object_id(idsmap): + """ + Tool function to parse the idsmap + Args: + idsmap: string containing object IDs map, like 3-4 or 7 + Return: + The upper and lower bound. In case the idsmap is illegal, it returns None, None + Example: + 3-4 => 3, 4 + 7 => 7 + 3- => None, None + """ + try: + match = re.search("^([0-9]+)(-[0-9]+)?$", idsmap) + lower = int(match.group(1)) + if match.group(2): + upper = int(match.group(2)[1:]) + else: + upper = lower + except Exception: + lower, upper = None, None + + return lower, upper + + config_db = db.cfgdb + object_name = "priority group" if is_pg else "queue" try: - lower = int(input_pg[0]) - upper = int(input_pg[-1]) + # Fetch maximum object id from STATE_DB + state_db = db.db + field_name = 'max_priority_groups' if is_pg else 'max_queues' - if upper < lower or lower < 0 or upper > 7: - ctx.fail("PG {} is not valid.".format(input_pg)) + _hash = 'BUFFER_MAX_PARAM_TABLE|{}'.format(interface_name) + buffer_max_params = state_db.get_all(state_db.STATE_DB, _hash) + maximum_id = int(buffer_max_params.get(field_name)) - 1 except Exception: - ctx.fail("PG {} is not valid.".format(input_pg)) + ctx.fail("Unable to fetch {} from {} in STATE_DB".format(field_name, _hash)) + + lower, upper = _parse_object_id(input_map) + if not upper or not lower or upper < lower or lower < 0 or upper > maximum_id: + ctx.fail("Buffer {} {} is not valid.".format(object_name, input_map)) # Check overlapping. # To configure a new PG which is overlapping an existing one is not allowed # For example, to add '5-6' while '3-5' existing is illegal - existing_pgs = config_db.get_table("BUFFER_PG") - if not is_new_pg: - if not (interface_name, input_pg) in existing_pgs.keys(): - ctx.fail("PG {} doesn't exist".format(input_pg)) + existing_object_maps = config_db.get_table("BUFFER_PG" if is_pg else "BUFFER_QUEUE") + if not is_new_id: + if not (interface_name, input_map) in existing_object_maps.keys(): + ctx.fail("Buffer {} {} doesn't exist".format(object_name, input_map)) return - for k, v in existing_pgs.items(): - port, existing_pg = k + for k, v in existing_object_maps.items(): + port, existing_object_map = k if port == interface_name: - existing_lower = int(existing_pg[0]) - existing_upper = int(existing_pg[-1]) + existing_lower, existing_upper = _parse_object_id(existing_object_map) if existing_upper < lower or existing_lower > upper: # new and existing pgs disjoint, legal pass else: - ctx.fail("PG {} overlaps with existing PG {}".format(input_pg, existing_pg)) + ctx.fail("Buffer {} {} overlaps with existing {} {}".format(object_name, input_map, object_name, existing_object_map)) -def update_pg(ctx, interface_name, pg_map, override_profile, add = True): - config_db = ctx.obj["config_db"] +def update_buffer_object(db, interface_name, object_map, override_profile, is_pg, add=True): + config_db = db.cfgdb + ctx = click.get_current_context() # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) if not ports: ctx.fail("Port {} doesn't exist".format(interface_name)) - # Check whether pg_map is legal + buffer_table = "BUFFER_PG" if is_pg else "BUFFER_QUEUE" + + # Check whether object_map is legal # Check whether there is other lossless profiles configured on the interface - pgmaps_check_legality(ctx, interface_name, pg_map, add) + buffer_objects_map_check_legality(ctx, db, interface_name, object_map, add, is_pg) # All checking passed if override_profile: profile_dict = config_db.get_entry("BUFFER_PROFILE", override_profile) if not profile_dict: ctx.fail("Profile {} doesn't exist".format(override_profile)) - if not 'xoff' in profile_dict.keys() and 'size' in profile_dict.keys(): - ctx.fail("Profile {} doesn't exist or isn't a lossless profile".format(override_profile)) - config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": override_profile}) + pool_name = profile_dict.get("pool") + if not pool_name: + ctx.fail("Profile {} is invalid".format(override_profile)) + pool_dict = config_db.get_entry("BUFFER_POOL", pool_name) + pool_dir = pool_dict.get("type") + expected_dir = "ingress" if is_pg else "egress" + if pool_dir != expected_dir: + ctx.fail("Type of pool {} referenced by profile {} is wrong".format(pool_name, override_profile)) + if is_pg: + if not 'xoff' in profile_dict.keys() and 'size' in profile_dict.keys(): + ctx.fail("Profile {} doesn't exist or isn't a lossless profile".format(override_profile)) + config_db.set_entry(buffer_table, (interface_name, object_map), {"profile": override_profile}) else: - config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": "NULL"}) - adjust_pfc_enable(ctx, interface_name, pg_map, True) + config_db.set_entry(buffer_table, (interface_name, object_map), {"profile": "NULL"}) + if is_pg: + adjust_pfc_enable(ctx, db, interface_name, object_map, True) -def remove_pg_on_port(ctx, interface_name, pg_map): - config_db = ctx.obj["config_db"] + +def remove_buffer_object_on_port(db, interface_name, buffer_object_map, is_pg=True): + config_db = db.cfgdb + ctx = click.get_current_context() # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) @@ -3979,30 +4024,32 @@ def remove_pg_on_port(ctx, interface_name, pg_map): ctx.fail("Port {} doesn't exist".format(interface_name)) # Remvoe all dynamic lossless PGs on the port - existing_pgs = config_db.get_table("BUFFER_PG") + buffer_table = "BUFFER_PG" if is_pg else "BUFFER_QUEUE" + existing_buffer_objects = config_db.get_table(buffer_table) removed = False - for k, v in existing_pgs.items(): - port, existing_pg = k - if port == interface_name and (not pg_map or pg_map == existing_pg): - need_to_remove = False + for k, v in existing_buffer_objects.items(): + port, existing_buffer_object = k + if port == interface_name and (not buffer_object_map or buffer_object_map == existing_buffer_object): referenced_profile = v.get('profile') if referenced_profile and referenced_profile == 'ingress_lossy_profile': - if pg_map: - ctx.fail("Lossy PG {} can't be removed".format(pg_map)) + if buffer_object_map: + ctx.fail("Lossy PG {} can't be removed".format(buffer_object_map)) else: continue - config_db.set_entry("BUFFER_PG", (interface_name, existing_pg), None) - adjust_pfc_enable(ctx, interface_name, pg_map, False) + config_db.set_entry(buffer_table, (interface_name, existing_buffer_object), None) + if is_pg: + adjust_pfc_enable(ctx, db, interface_name, buffer_object_map, False) removed = True if not removed: - if pg_map: - ctx.fail("No specified PG {} found on port {}".format(pg_map, interface_name)) + object_name = "lossless priority group" if is_pg else "queue" + if buffer_object_map: + ctx.fail("No specified {} {} found on port {}".format(object_name, buffer_object_map, interface_name)) else: - ctx.fail("No lossless PG found on port {}".format(interface_name)) + ctx.fail("No {} found on port {}".format(object_name, interface_name)) -def adjust_pfc_enable(ctx, interface_name, pg_map, add): - config_db = ctx.obj["config_db"] +def adjust_pfc_enable(ctx, db, interface_name, pg_map, add): + config_db = db.cfgdb # Fetch the original pfc_enable qosmap = config_db.get_entry("PORT_QOS_MAP", interface_name) @@ -4077,10 +4124,10 @@ def lossless(ctx): @click.argument('interface_name', metavar='', required=True) @click.argument('pg_map', metavar='', required=True) @click.argument('override_profile', metavar='', required=False) -@click.pass_context -def add_pg(ctx, interface_name, pg_map, override_profile): +@clicommon.pass_db +def add_pg(db, interface_name, pg_map, override_profile): """Set lossless PGs for the interface""" - update_pg(ctx, interface_name, pg_map, override_profile) + update_buffer_object(db, interface_name, pg_map, override_profile, True) # @@ -4090,10 +4137,10 @@ def add_pg(ctx, interface_name, pg_map, override_profile): @click.argument('interface_name', metavar='', required=True) @click.argument('pg_map', metavar='', required=True) @click.argument('override_profile', metavar='', required=False) -@click.pass_context -def set_pg(ctx, interface_name, pg_map, override_profile): +@clicommon.pass_db +def set_pg(db, interface_name, pg_map, override_profile): """Set lossless PGs for the interface""" - update_pg(ctx, interface_name, pg_map, override_profile, False) + update_buffer_object(db, interface_name, pg_map, override_profile, True, False) # @@ -4102,10 +4149,58 @@ def set_pg(ctx, interface_name, pg_map, override_profile): @lossless.command('remove') @click.argument('interface_name', metavar='', required=True) @click.argument('pg_map', metavar=' + config interface buffer queue set + config interface buffer queue remove + ``` + + The represents the map of queues. It can be in one of the following two forms: + + - For a range of priorities, the lower bound and upper bound connected by a dash, like `3-4` + - For a single priority, the number, like `6` + + The subcommand `add` is designed for adding a buffer profile for a group of queues. The new queue range must be disjoint with all queues with buffer profile configured. + + For example, currently the buffer profile configured on queue 3-4 on port Ethernet4, to configure buffer profile on queue 4-5 will fail because it isn't disjoint with 3-4. To configure it on range 5-6 will succeed. + + The `profile` parameter represents a predefined egress buffer profile to be configured on the queues. + + The subcommand `set` is designed for modifying an existing group of queues. + + The subcommand `remove` is designed for removing buffer profile on an existing group of queues. + +- Example: + + To configure buffer profiles for queues on a port: + + ``` + admin@sonic:~$ sudo config interface buffer queue add Ethernet0 3-4 egress_lossless_profile + ``` + + To change the profile used for queues on a port: + + ``` + admin@sonic:~$ sudo config interface buffer queue set Ethernet0 3-4 new-profile + ``` + + To remove a group of queues from a port: + + ``` + admin@sonic:~$ sudo config interface buffer queue remove Ethernet0 3-4 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#dynamic-buffer-management) + ### Show commands **show buffer information** diff --git a/tests/buffer_input/buffer_test_vectors.py b/tests/buffer_input/buffer_test_vectors.py index eebbc5bc59..b94d428a39 100644 --- a/tests/buffer_input/buffer_test_vectors.py +++ b/tests/buffer_input/buffer_test_vectors.py @@ -53,6 +53,20 @@ headroom_type dynamic ------------- --------------------- +Profile: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + """ show_buffer_information_output="""\ diff --git a/tests/buffer_test.py b/tests/buffer_test.py index c9580d8750..b9404fede4 100644 --- a/tests/buffer_test.py +++ b/tests/buffer_test.py @@ -1,5 +1,8 @@ import os import sys +import pytest +import mock +from importlib import reload from click.testing import CliRunner from unittest import TestCase from swsscommon.swsscommon import ConfigDBConnector @@ -269,3 +272,273 @@ def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ['UTILITIES_UNIT_TESTING'] = "0" print("TEARDOWN") + + +class TestInterfaceBuffer(object): + @pytest.fixture(scope="class", autouse=True) + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + import config.main as config + reload(config) + yield + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + from .mock_tables import dbconnector + dbconnector.dedicated_dbs = {} + + def test_config_int_buffer_pg_lossless_add(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert {'profile': 'NULL'} == db.cfgdb.get_entry('BUFFER_PG', 'Ethernet0|5') + assert {'pfc_enable': '3,4,5'} == db.cfgdb.get_entry('PORT_QOS_MAP', 'Ethernet0') + + # Try to add an existing entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 5 overlaps with existing priority group 5" in result.output + + # Try to add an overlap entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "2-3"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 2-3 overlaps with existing priority group 3-4" in result.output + + # Try to add an overlap entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "4-5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 4-5 overlaps with existing priority group 3-4" in result.output + + # Try to add a lossy profile + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "6", "ingress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Profile ingress_lossy_profile doesn't exist or isn't a lossless profile" in result.output + + # Try to add a large pg + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "8", "ingress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 8 is not valid" in result.output + + # Try to use a pg map in wrong format + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "3-", "testprofile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 3- is not valid" in result.output + + # Try to use a pg which is not a number + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "a"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group a is not valid" in result.output + + # Try to use a non-exist profile + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["add"], + ["Ethernet0", "7", "testprofile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Profile testprofile doesn't exist" in result.output + + # Try to remove all lossless profiles + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + keys = db.cfgdb.get_keys('BUFFER_PG') + assert set(keys) == {('Ethernet0', '0'), ('Ethernet0', '3-4'), ('Ethernet0', '5')} + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["remove"], + ["Ethernet0"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + keys = db.cfgdb.get_keys('BUFFER_PG') + assert keys == [('Ethernet0', '0')] + assert {'pfc_enable': ''} == db.cfgdb.get_entry('PORT_QOS_MAP', 'Ethernet0') + + def test_config_int_buffer_pg_lossless_set(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Set a non-exist entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["set"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer priority group 5 doesn't exist" in result.output + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["set"], + ["Ethernet0", "3-4", "headroom_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert {'profile': 'headroom_profile'} == db.cfgdb.get_entry('BUFFER_PG', 'Ethernet0|3-4') + assert {'pfc_enable': '3,4'} == db.cfgdb.get_entry('PORT_QOS_MAP', 'Ethernet0') + + def test_config_int_buffer_pg_lossless_remove(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Remove non-exist entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["remove"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "No specified lossless priority group 5 found on port Ethernet0" in result.output + + # Remove lossy PG + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["remove"], + ["Ethernet0", "0"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Lossy PG 0 can't be removed" in result.output + + # Remove existing lossless PG + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + # Remove one lossless PG + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["priority-group"]. + commands["lossless"].commands["remove"], + ["Ethernet0", "3-4"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert [('Ethernet0', '0')] == db.cfgdb.get_keys('BUFFER_PG') + assert {'pfc_enable': ''} == db.cfgdb.get_entry('PORT_QOS_MAP', 'Ethernet0') + + # Remove all lossless PGs is tested in the 'add' test case to avoid repeating adding PGs + + def test_config_int_buffer_queue_add(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Not providing a profile + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["add"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Missing argument" in result.output + + # Add existing + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["add"], + ["Ethernet0", "3-4", "egress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer queue 3-4 overlaps with existing queue 3-4" in result.output + + # Normal add + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["add"], + ["Ethernet0", "5", "egress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + queue = db.cfgdb.get_entry('BUFFER_QUEUE', 'Ethernet0|5') + assert queue == {'profile': 'egress_lossy_profile'} + + # Large queue ID + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["add"], + ["Ethernet0", "20", "egress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Buffer queue 20 is not valid" in result.output + + # Remove all + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + keys = db.cfgdb.get_keys('BUFFER_QUEUE') + assert set(keys) == {('Ethernet0', '3-4'), ('Ethernet0', '5')} + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["remove"], + ["Ethernet0"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert [] == db.cfgdb.get_keys('BUFFER_QUEUE') + + def test_config_int_buffer_queue_set(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Remove non-exist entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["set"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Missing argument" in result.output + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["set"], + ["Ethernet0", "3-4", "headroom_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "Type of pool ingress_lossless_pool referenced by profile headroom_profile is wrong" in result.output + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["set"], + ["Ethernet0", "3-4", "egress_lossy_profile"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + queue = db.cfgdb.get_entry('BUFFER_QUEUE', 'Ethernet0|3-4') + assert queue == {'profile': 'egress_lossy_profile'} + + def test_config_int_buffer_queue_remove(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Remove non-exist entry + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["remove"], + ["Ethernet0", "5"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code + assert "No specified queue 5 found on port Ethernet0" in result.output + + # Remove existing queue + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["buffer"].commands["queue"].commands["remove"], + ["Ethernet0", "3-4"], obj=db) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert [] == db.cfgdb.get_keys('BUFFER_QUEUE') + + # Removing all queues is tested in "add" test case to avoid repeating adding queues. diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 59bd5880c6..5c26ad4f73 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1767,12 +1767,25 @@ "pool": "ingress_lossless_pool", "headroom_type": "dynamic" }, + "BUFFER_PROFILE|egress_lossless_profile": { + "dynamic_th": "0", + "pool": "egress_lossless_pool", + "size": "0" + }, + "BUFFER_PROFILE|egress_lossy_profile": { + "dynamic_th": "0", + "pool": "egress_lossy_pool", + "size": "0" + }, "BUFFER_PG|Ethernet0|3-4": { "profile": "NULL" }, "BUFFER_PG|Ethernet0|0": { "profile": "ingress_lossy_profile" }, + "BUFFER_QUEUE|Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, "PORT_QOS_MAP|Ethernet0": { "pfc_enable": "3,4" }, diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 9e7501c320..ced65dfc6d 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -626,6 +626,10 @@ "BUFFER_MAX_PARAM_TABLE|global": { "mmu_size": "13945824" }, + "BUFFER_MAX_PARAM_TABLE|Ethernet0": { + "max_queues": "20", + "max_priority_groups": "8" + }, "CHASSIS_MIDPLANE_TABLE|SUPERVISOR0": { "ip_address": "192.168.1.100", "access": "True"