From 1c094d302fa86924a3c72dea78fc599813f4eba0 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Wed, 21 Feb 2024 03:51:30 +0800 Subject: [PATCH 01/45] [show] Update show run all to cover all asic config in multiasic (#3148) * [show] Update show run all to cover all asic config in masic * per comment --- show/main.py | 51 ++++++++++++++++++++++++++-------------- tests/show_test.py | 58 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 19 deletions(-) diff --git a/show/main.py b/show/main.py index 725556e6e86..c1995ad27df 100755 --- a/show/main.py +++ b/show/main.py @@ -142,6 +142,24 @@ def get_cmd_output(cmd): proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) return proc.communicate()[0], proc.returncode +def get_config_json_by_namespace(namespace): + cmd = ['sonic-cfggen', '-d', '--print-data'] + if namespace is not None and namespace != multi_asic.DEFAULT_NAMESPACE: + cmd += ['-n', namespace] + + stdout, rc = get_cmd_output(cmd) + if rc: + click.echo("Failed to get cmd output '{}':rc {}".format(cmd, rc)) + raise click.Abort() + + try: + config_json = json.loads(stdout) + except JSONDecodeError as e: + click.echo("Failed to load output '{}':{}".format(cmd, e)) + raise click.Abort() + + return config_json + # Lazy global class instance for SONiC interface name to alias conversion iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) @@ -1407,25 +1425,24 @@ def runningconfiguration(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Show full running configuration""" - cmd = ['sonic-cfggen', '-d', '--print-data'] - stdout, rc = get_cmd_output(cmd) - if rc: - click.echo("Failed to get cmd output '{}':rc {}".format(cmd, rc)) - raise click.Abort() + output = {} + bgpraw_cmd = "show running-config" - try: - output = json.loads(stdout) - except JSONDecodeError as e: - click.echo("Failed to load output '{}':{}".format(cmd, e)) - raise click.Abort() + import utilities_common.bgp_util as bgp_util + # In multiaisc, the namespace is changed to 'localhost' by design + host_config = get_config_json_by_namespace(multi_asic.DEFAULT_NAMESPACE) + output['localhost'] = host_config - if not multi_asic.is_multi_asic(): - bgpraw_cmd = [constants.RVTYSH_COMMAND, '-c', 'show running-config'] - bgpraw, rc = get_cmd_output(bgpraw_cmd) - if rc: - bgpraw = "" - output['bgpraw'] = bgpraw - click.echo(json.dumps(output, indent=4)) + if multi_asic.is_multi_asic(): + ns_list = multi_asic.get_namespace_list() + for ns in ns_list: + ns_config = get_config_json_by_namespace(ns) + ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) + output[ns] = ns_config + click.echo(json.dumps(output, indent=4)) + else: + host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd) + click.echo(json.dumps(output['localhost'], indent=4)) # 'acl' subcommand ("show runningconfiguration acl") diff --git a/tests/show_test.py b/tests/show_test.py index 5b55c15896b..077005b2201 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -2,8 +2,10 @@ import sys import click import pytest +import importlib import subprocess import show.main as show +import utilities_common.bgp_util as bgp_util from unittest import mock from click.testing import CliRunner from utilities_common import constants @@ -32,6 +34,12 @@ class TestShowRunAllCommands(object): def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + + def mock_run_bgp_command(): + return "" def test_show_runningconfiguration_all_json_loads_failure(self): def get_cmd_output_side_effect(*args, **kwargs): @@ -55,16 +63,62 @@ def get_cmd_output_side_effect(*args, **kwargs): with mock.patch('show.main.get_cmd_output', mock.MagicMock(side_effect=get_cmd_output_side_effect)) as mock_get_cmd_output: result = CliRunner().invoke(show.cli.commands['runningconfiguration'].commands['all'], []) - assert mock_get_cmd_output.call_count == 2 + assert result.exit_code == 0 + assert mock_get_cmd_output.call_count == 1 + assert mock_get_cmd_output.call_args_list == [ + call(['sonic-cfggen', '-d', '--print-data'])] + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + bgp_util.run_bgp_command = cls._old_run_bgp_command + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + +class TestShowRunAllCommandsMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def mock_run_bgp_command(): + return "" + + def test_show_runningconfiguration_all_masic(self): + def get_cmd_output_side_effect(*args, **kwargs): + return "{}", 0 + with mock.patch('show.main.get_cmd_output', + mock.MagicMock(side_effect=get_cmd_output_side_effect)) as mock_get_cmd_output: + result = CliRunner().invoke(show.cli.commands['runningconfiguration'].commands['all'], []) + assert result.exit_code == 0 + assert mock_get_cmd_output.call_count == 3 assert mock_get_cmd_output.call_args_list == [ call(['sonic-cfggen', '-d', '--print-data']), - call(['rvtysh', '-c', 'show running-config'])] + call(['sonic-cfggen', '-d', '--print-data', '-n', 'asic0']), + call(['sonic-cfggen', '-d', '--print-data', '-n', 'asic1'])] @classmethod def teardown_class(cls): print("TEARDOWN") + bgp_util.run_bgp_command = cls._old_run_bgp_command os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + @patch('show.main.run_command') @pytest.mark.parametrize( From 6e4130d8e2f95094f8f294206a0578b0babb354e Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Fri, 23 Feb 2024 05:57:53 +0800 Subject: [PATCH 02/45] [config] Check golden config exist early if flag is set (#3169) ### What I did Fix https://github.com/sonic-net/sonic-utilities/issues/3164 Check Golden Config earlier before service is down. #### How I did it Move the check at the begining #### How to verify it Unit test --- config/main.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/config/main.py b/config/main.py index b039c56929f..f2113f255fd 100644 --- a/config/main.py +++ b/config/main.py @@ -1707,6 +1707,15 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") + # check if golden_config exists if override flag is set + if override_config: + if golden_config_path is None: + golden_config_path = DEFAULT_GOLDEN_CONFIG_DB_FILE + if not os.path.isfile(golden_config_path): + click.secho("Cannot find '{}'!".format(golden_config_path), + fg='magenta') + raise click.Abort() + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") @@ -1778,12 +1787,6 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, # Load golden_config_db.json if override_config: - if golden_config_path is None: - golden_config_path = DEFAULT_GOLDEN_CONFIG_DB_FILE - if not os.path.isfile(golden_config_path): - click.secho("Cannot find '{}'!".format(golden_config_path), - fg='magenta') - raise click.Abort() override_config_by(golden_config_path) # Invoke platform script if available before starting the services From 7670609a47e1419e5f3154866304f7578a02cd96 Mon Sep 17 00:00:00 2001 From: longhuan-cisco <84595962+longhuan-cisco@users.noreply.github.com> Date: Fri, 23 Feb 2024 11:14:30 -0800 Subject: [PATCH 03/45] Fix sfputil CLI failure for multi-asic platforms (#3168) * Fix sfputil invalid namesapce error * Add test case for loading port configuration * Improve cov --- sfputil/main.py | 2 ++ tests/sfputil_test.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/sfputil/main.py b/sfputil/main.py index eddc43f3dc0..937b13fa2d0 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -24,6 +24,7 @@ from utilities_common.sfp_helper import covert_application_advertisement_to_output_string from utilities_common.sfp_helper import QSFP_DATA_MAP from tabulate import tabulate +from utilities_common.general import load_db_config VERSION = '3.0' @@ -563,6 +564,7 @@ def load_sfputilhelper(): def load_port_config(): + load_db_config() try: if multi_asic.is_multi_asic(): # For multi ASIC platforms we pass DIR of port_config_file_path and the number of asics diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 63814f31c5a..e732140f833 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1412,3 +1412,15 @@ def test_target_firmware(self, mock_chassis): result = runner.invoke(sfputil.cli.commands['firmware'].commands['target'], ["Ethernet0", "1"]) assert result.output == 'Target Mode set failed!\n' assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.multi_asic.is_multi_asic') + @patch('sfputil.main.platform_sfputil', MagicMock()) + @patch('sfputil.main.device_info.get_paths_to_platform_and_hwsku_dirs', + MagicMock(return_value=(None, None))) + @patch('sfputil.main.device_info.get_path_to_port_config_file', MagicMock(return_value=(''))) + def test_load_port_config(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True + assert sfputil.load_port_config() == True + + mock_is_multi_asic.return_value = False + assert sfputil.load_port_config() == True From aefb61bdf8b83b61699b3e9a284869c9ee9e83a3 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:12:43 -0800 Subject: [PATCH 04/45] Modify transceiver PM CLI to handle N/A value for DOM threshold (#3174) Signed-off-by: Mihir Patel --- scripts/sfpshow | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 81add132964..1fd7479ba44 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -517,8 +517,10 @@ class SFPShow(object): for suffix in ZR_PM_THRESHOLD_KEY_SUFFIXS: key = self.convert_pm_prefix_to_threshold_prefix( prefix) + suffix - thresholds.append( - float(sfp_threshold_dict[key]) if key in sfp_threshold_dict else None) + if key in sfp_threshold_dict and sfp_threshold_dict[key] != 'N/A': + thresholds.append(float(sfp_threshold_dict[key])) + else: + thresholds.append(None) tca_high, tca_low = None, None if values[2] is not None and thresholds[0] is not None: From ba98c7f8460259de9ba0054ad135299324d1570e Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:15:45 -0800 Subject: [PATCH 05/45] CLI enhancements to revtrieve data from TRANSCEIVER_FIRMWARE_INFO table (#3177) * Retrieve firmware version fields from TRANSCEIVER_FIRMWARE_INFO table Signed-off-by: Mihir Patel * Fixed test failures * Removed update_firmware_info_to_state_db function * Revert "Removed update_firmware_info_to_state_db function" This reverts commit 68f52a2c3352bc709ab2e3ffe793761f7176a4f0. --------- Signed-off-by: Mihir Patel --- scripts/sfpshow | 7 +++++-- sfputil/main.py | 11 ++++++----- tests/mock_tables/asic1/state_db.json | 6 ++++-- tests/mock_tables/state_db.json | 12 ++++++++---- tests/sfputil_test.py | 6 +----- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 1fd7479ba44..85e8d8a1f10 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -305,7 +305,7 @@ class SFPShow(object): return output # Convert sfp info in DB to cli output string - def convert_sfp_info_to_output_string(self, sfp_info_dict): + def convert_sfp_info_to_output_string(self, sfp_info_dict, sfp_firmware_info_dict): indent = ' ' * 8 output = '' is_sfp_cmis = 'cmis_rev' in sfp_info_dict @@ -333,6 +333,8 @@ class SFPShow(object): output += '{}N/A\n'.format((indent * 2)) elif key == 'application_advertisement': output += covert_application_advertisement_to_output_string(indent, sfp_info_dict) + elif key == 'active_firmware' or key == 'inactive_firmware': + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_firmware_info_dict[key] if key in sfp_firmware_info_dict else 'N/A') else: output += '{}{}: {}\n'.format(indent, data_map[key], sfp_info_dict[key]) @@ -441,12 +443,13 @@ class SFPShow(object): output = '' sfp_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface_name)) + sfp_firmware_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(interface_name)) if sfp_info_dict: if sfp_info_dict['type'] == RJ45_PORT_TYPE: output = 'SFP EEPROM is not applicable for RJ45 port\n' else: output = 'SFP EEPROM detected\n' - sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict) + sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict, sfp_firmware_info_dict) output += sfp_info_output if dump_dom: diff --git a/sfputil/main.py b/sfputil/main.py index 937b13fa2d0..dccbae60bbf 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -85,8 +85,6 @@ 'encoding': 'Encoding', 'connector': 'Connector', 'application_advertisement': 'Application Advertisement', - 'active_firmware': 'Active Firmware Version', - 'inactive_firmware': 'Inactive Firmware Version', 'hardware_rev': 'Hardware Revision', 'media_interface_code': 'Media Interface Code', 'host_electrical_interface': 'Host Electrical Interface', @@ -1316,9 +1314,12 @@ def update_firmware_info_to_state_db(port_name): state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) if state_db is not None: state_db.connect(state_db.STATE_DB) - active_firmware, inactive_firmware = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() + if transceiver_firmware_info_dict is not None: + active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') + inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) # 'firmware' subgroup @cli.group() diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index 7397d25b8fc..195b8e87f37 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -29,8 +29,6 @@ "media_interface_technology" : "1550 nm DFB", "vendor_rev" : "XX", "cmis_rev" : "4.1", - "active_firmware" : "X.X", - "inactive_firmware" : "X.X", "supported_max_tx_power" : "4.0", "supported_min_tx_power" : "-22.9", "supported_max_laser_freq" : "196100", @@ -70,6 +68,10 @@ "vcclowalarm": "2.9700", "vcclowwarning": "3.1349" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { + "active_firmware": "X.X", + "inactive_firmware": "X.X" + }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" }, diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 993e05c4806..b266b5e8345 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -684,16 +684,17 @@ "media_interface_technology" : "1550 nm DFB", "vendor_rev" : "XX", "cmis_rev" : "4.1", - "active_firmware" : "X.X", - "inactive_firmware" : "X.X", "supported_max_tx_power" : "4.0", "supported_min_tx_power" : "-22.9", "supported_max_laser_freq" : "196100", "supported_min_laser_freq" : "191300" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { + "active_firmware": "X.X", + "inactive_firmware": "X.X" + }, "TRANSCEIVER_INFO|Ethernet72": { "active_apsel_hostlane4": "N/A", - "active_firmware": "0.0", "is_replaceable": "True", "application_advertisement": "{1: {'host_electrical_interface_id': 'IB NDR', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 4, 'host_lane_count': 4, 'host_lane_assignment_options': 17}, 2: {'host_electrical_interface_id': 'IB SDR (Arch.Spec.Vol.2)', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 4, 'host_lane_count': 4, 'host_lane_assignment_options': 17}}", "host_electrical_interface": "N/A", @@ -710,7 +711,6 @@ "supported_min_laser_freq": "N/A", "serial": "serial1 ", "active_apsel_hostlane7": "N/A", - "inactive_firmware": "N/A", "active_apsel_hostlane1": "N/A", "type": "OSFP 8X Pluggable Transceiver", "cable_length": "1.0", @@ -796,6 +796,10 @@ "txbiaslowalarm": "N/A", "txbiaslowwarning": "N/A" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet72": { + "active_firmware": "0.0", + "inactive_firmware": "N/A" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index e732140f833..9eda8ca249f 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -151,8 +151,6 @@ def test_format_dict_value_to_string(self): 'specification_compliance': "sm_media_interface", 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no', 'Voltage_support': 'no', 'Temp_support': 'no'}", 'nominal_bit_rate': '0', - 'active_firmware': '0.1', - 'inactive_firmware': '0.0', 'hardware_rev': '0.0', 'media_interface_code': '400ZR, DWDM, amplified', 'host_electrical_interface': '400GAUI-8 C2M (Annex 120E)', @@ -184,7 +182,6 @@ def test_format_dict_value_to_string(self): " Active App Selection Host Lane 6: 1\n" " Active App Selection Host Lane 7: 1\n" " Active App Selection Host Lane 8: 1\n" - " Active Firmware Version: 0.1\n" " Application Advertisement: 400G CR8 - Host Assign (0x1) - Copper cable - Media Assign (0x2)\n" " 200GBASE-CR4 (Clause 136) - Host Assign (Unknown) - Unknown - Media Assign (Unknown)\n" " CMIS Revision: 5.0\n" @@ -197,7 +194,6 @@ def test_format_dict_value_to_string(self): " Host Lane Assignment Options: 1\n" " Host Lane Count: 8\n" " Identifier: QSFP-DD Double Density 8X Pluggable Transceiver\n" - " Inactive Firmware Version: 0.0\n" " Length Cable Assembly(m): 0\n" " Media Interface Code: 400ZR, DWDM, amplified\n" " Media Interface Technology: C-band tunable laser\n" @@ -1113,7 +1109,7 @@ def test_firmware_commit_cli(self): def test_update_firmware_info_to_state_db(self, mock_chassis): mock_sfp = MagicMock() mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) - mock_sfp.get_transceiver_info_firmware_versions.return_value = ['a.b.c', 'd.e.f'] + mock_sfp.get_transceiver_info_firmware_versions.return_value = {'active_firmware' : 'a.b.c', 'inactive_firmware' : 'd.e.f'} sfputil.update_firmware_info_to_state_db("Ethernet0") From bf3559604b2bcb027093e2d1afa68a8de8503c15 Mon Sep 17 00:00:00 2001 From: Mati Alfaro Date: Thu, 29 Feb 2024 05:09:50 +0200 Subject: [PATCH 06/45] Add vlan validation in config interface ip add command (#3155) --- config/main.py | 12 +++++++++++- tests/ip_config_test.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index f2113f255fd..fc9c72f07e8 100644 --- a/config/main.py +++ b/config/main.py @@ -4513,7 +4513,11 @@ def fec(ctx, interface_name, interface_fec, verbose): def ip(ctx): """Set IP interface attributes""" pass - + +def validate_vlan_exists(db,text): + data = db.get_table('VLAN') + keys = list(data.keys()) + return text in keys # # 'add' subcommand # @@ -4577,6 +4581,12 @@ def add(ctx, interface_name, ip_addr, gw): table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + + if table_name == "VLAN_INTERFACE": + if not validate_vlan_exists(config_db, interface_name): + ctx.fail(f"Error: {interface_name} does not exist. Vlan must be created before adding an IP address") + return + interface_entry = config_db.get_entry(table_name, interface_name) if len(interface_entry) == 0: if table_name == "VLAN_SUB_INTERFACE": diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index 2f262a4a09c..b227c76ff32 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -13,6 +13,7 @@ import utilities_common.bgp_util as bgp_util ERROR_MSG = "Error: IP address is not valid" +NOT_EXIST_VLAN_ERROR_MSG ="does not exist" INVALID_VRF_MSG ="""\ Usage: bind [OPTIONS] @@ -43,6 +44,37 @@ def setup_class(cls): def mock_run_bgp_command(): return "" + def test_add_vlan_interface_ipv4(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config int ip add Vlan100 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan100", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + # create vlan 4093 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["4093"], obj=db) + # config int ip add Vlan4093 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan4093", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + # config int ip add Vlan000000000000003 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan000000000000003", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + # config int ip add Vlan1.2 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan1.2", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + def test_add_del_interface_valid_ipv4(self): db = Db() runner = CliRunner() From 92220dcf10df3daaeaf193907545fbabf7941515 Mon Sep 17 00:00:00 2001 From: Saba Akram <126749695+sabakram@users.noreply.github.com> Date: Fri, 1 Mar 2024 12:29:09 +0500 Subject: [PATCH 07/45] Fix for Switch Port Modes and VLAN CLI Enhancement (#3108) * Fix for switchport mode PR * Fix for cli.py * Fix for indentation * Fix for error * Fix * Fixing indentation errors * Fix for error * Fix for failures * Fix for errors * Fix for port version * Fix for DB migrator versions * Fix for db migrator version function * Fixing versions * Fix for cli.py * Fix for vlan_test.py * Fix for failures * Fix for unexpected characters * Fixing error message * Fix for routed port --- config/main.py | 40 +- config/switchport.py | 137 ++++ config/vlan.py | 352 ++++++---- doc/Command-Reference.md | 153 +++++ scripts/db_migrator.py | 38 +- show/interfaces/__init__.py | 67 ++ .../config_db/port-an-expected.json | 3 + .../config_db/portchannel-expected.json | 5 + .../config_db/switchport-expected.json | 144 ++++ .../config_db/switchport-input.json | 138 ++++ tests/db_migrator_test.py | 28 + tests/interfaces_test.py | 100 +++ tests/ipv6_link_local_test.py | 2 +- tests/mock_tables/asic0/config_db.json | 1 + tests/mock_tables/config_db.json | 35 +- tests/vlan_test.py | 630 +++++++++++++++++- utilities_common/cli.py | 156 ++++- 17 files changed, 1888 insertions(+), 141 deletions(-) create mode 100644 config/switchport.py create mode 100644 tests/db_migrator_input/config_db/switchport-expected.json create mode 100644 tests/db_migrator_input/config_db/switchport-input.json diff --git a/config/main.py b/config/main.py index fc9c72f07e8..e4065142f23 100644 --- a/config/main.py +++ b/config/main.py @@ -57,6 +57,7 @@ from .config_mgmt import ConfigMgmtDPB, ConfigMgmt from . import mclag from . import syslog +from . import switchport from . import dns # mock masic APIs for unit test @@ -105,6 +106,7 @@ PORT_SPEED = "speed" PORT_TPID = "tpid" DEFAULT_TPID = "0x8100" +PORT_MODE= "switchport_mode" asic_type = None @@ -1211,6 +1213,9 @@ def config(ctx): # DNS module config.add_command(dns.dns) +# Switchport module +config.add_command(switchport.switchport) + @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @@ -4537,19 +4542,40 @@ def add(ctx, interface_name, ip_addr, gw): if interface_name is None: ctx.fail("'interface_name' is None!") - # Add a validation to check this interface is not a member in vlan before - # changing it to a router port - vlan_member_table = config_db.get_table('VLAN_MEMBER') - if (interface_is_in_vlan(vlan_member_table, interface_name)): - click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) - return - portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') if interface_is_in_portchannel(portchannel_member_table, interface_name): ctx.fail("{} is configured as a member of portchannel." .format(interface_name)) + + + # Add a validation to check this interface is in routed mode before + # assigning an IP address to it + + sub_intf = False + if clicommon.is_valid_port(config_db, interface_name): + is_port = True + elif clicommon.is_valid_portchannel(config_db, interface_name): + is_port = False + else: + sub_intf = True + + if not sub_intf: + interface_mode = "routed" + if is_port: + interface_data = config_db.get_entry('PORT',interface_name) + elif not is_port: + interface_data = config_db.get_entry('PORTCHANNEL',interface_name) + + if "mode" in interface_data: + interface_mode = interface_data["mode"] + + if interface_mode != "routed": + ctx.fail("Interface {} is not in routed mode!".format(interface_name)) + return + + try: ip_address = ipaddress.ip_interface(ip_addr) except ValueError as err: diff --git a/config/switchport.py b/config/switchport.py new file mode 100644 index 00000000000..a714f9427ff --- /dev/null +++ b/config/switchport.py @@ -0,0 +1,137 @@ +import click +from .utils import log +import utilities_common.cli as clicommon + +# +# 'switchport' mode ('config switchport ...') +# + + +@click.group(cls=clicommon.AbbreviationGroup, name='switchport') +def switchport(): + """Switchport mode configuration tasks""" + pass + + +@switchport.command("mode") +@click.argument("type", metavar="", required=True, type=click.Choice(["access", "trunk", "routed"])) +@click.argument("port", metavar="port", required=True) +@clicommon.pass_db +def switchport_mode(db, type, port): + """switchport mode help commands.Mode_type can be access or trunk or routed""" + + ctx = click.get_current_context() + + log.log_info("'switchport mode {} {}' executing...".format(type, port)) + mode_exists_status = True + + # checking if port name with alias exists + if clicommon.get_interface_naming_mode() == "alias": + alias = port + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + port = iface_alias_converter.alias_to_name(port) + if port is None: + ctx.fail("cannot find port name for alias {}".format(alias)) + + if clicommon.is_port_mirror_dst_port(db.cfgdb, port): + ctx.fail("{} is configured as mirror destination port".format(port)) + + + if clicommon.is_valid_port(db.cfgdb, port): + is_port = True + elif clicommon.is_valid_portchannel(db.cfgdb, port): + is_port = False + else: + ctx.fail("{} does not exist".format(port)) + + portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') + + if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): + ctx.fail("{} is part of portchannel!".format(port)) + + if is_port: + port_data = db.cfgdb.get_entry('PORT',port) + else: + port_data = db.cfgdb.get_entry('PORTCHANNEL',port) + + # mode type is either access or trunk + if type != "routed": + + if "mode" in port_data: + existing_mode = port_data["mode"] + else: + existing_mode = "routed" + mode_exists_status = False + if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ + (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): + ctx.fail("Remove IP from {} to change mode!".format(port)) + + if existing_mode == "routed": + if mode_exists_status: + # if the port in an interface + if is_port: + db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) + # if not port then is a port channel + elif not is_port: + db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) + + if not mode_exists_status: + port_data["mode"] = type + if is_port: + db.cfgdb.set_entry("PORT", port, port_data) + # if not port then is a port channel + elif not is_port: + db.cfgdb.set_entry("PORTCHANNEL", port, port_data) + + if existing_mode == type: + ctx.fail("{} is already in the {} mode".format(port,type)) + else: + if existing_mode == "access" and type == "trunk": + pass + if existing_mode == "trunk" and type == "access": + if clicommon.interface_is_tagged_member(db.cfgdb,port): + ctx.fail("{} is in {} mode and have tagged member(s).\nRemove tagged member(s) from {} to switch to {} mode".format(port,existing_mode,port,type)) + if is_port: + db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) + # if not port then is a port channel + elif not is_port: + db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) + + click.echo("{} switched from {} to {} mode".format(port, existing_mode, type)) + + # if mode type is routed + else: + + if clicommon.interface_is_tagged_member(db.cfgdb,port): + ctx.fail("{} has tagged member(s). \nRemove them to change mode to {}".format(port,type)) + + if clicommon.interface_is_untagged_member(db.cfgdb,port): + ctx.fail("{} has untagged member. \nRemove it to change mode to {}".format(port,type)) + + if "mode" in port_data: + existing_mode = port_data["mode"] + else: + existing_mode = "routed" + mode_exists_status = False + + if not mode_exists_status: + port_data["mode"] = type + if is_port: + db.cfgdb.set_entry("PORT", port, port_data) + + # if not port then is a port channel + elif not is_port: + db.cfgdb.set_entry("PORTCHANNEL", port, port_data) + pass + + elif mode_exists_status and existing_mode == type: + ctx.fail("{} is already in {} mode".format(port,type)) + + else: + if is_port: + db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) + # if not port then is a port channel + elif not is_port: + db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) + + click.echo("{} switched from {} to {} mode".format(port,existing_mode,type)) diff --git a/config/vlan.py b/config/vlan.py index 7ace1d6d5f3..121a854c32d 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -32,28 +32,53 @@ def is_dhcp_relay_running(): @vlan.command('add') -@click.argument('vid', metavar='', required=True, type=int) +@click.argument('vid', metavar='', required=True) +@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") @clicommon.pass_db -def add_vlan(db, vid): +def add_vlan(db, vid, multiple): """Add VLAN""" ctx = click.get_current_context() - vlan = 'Vlan{}'.format(vid) config_db = ValidatedConfigDBConnector(db.cfgdb) + + vid_list = [] + # parser will parse the vid input if there are syntax errors it will throw error + if multiple: + vid_list = clicommon.multiple_vlan_parser(ctx, vid) + else: + if not vid.isdigit(): + ctx.fail("{} is not integer".format(vid)) + vid_list.append(int(vid)) + if ADHOC_VALIDATION: - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) - if vid == 1: - ctx.fail("{} is default VLAN".format(vlan)) # TODO: MISSING CONSTRAINT IN YANG MODEL + # loop will execute till an exception occurs + for vid in vid_list: + + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) + + #Multiple VLANs need to be referenced + vlan = 'Vlan{}'.format(vid) - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} already exists".format(vlan)) - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): - ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) - # set dhcpv4_relay table - set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) + # default vlan checker + if vid == 1: + # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is default VLAN.".format(vlan)) + + log.log_info("'vlan add {}' executing...".format(vid)) + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): + log.log_info("{} already exists".format(vlan)) + ctx.fail("{} already exists, Aborting!!!".format(vlan)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): + ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) def is_dhcpv6_relay_config_exist(db, vlan_name): @@ -74,57 +99,74 @@ def delete_db_entry(entry_name, db_connector, db_name): @vlan.command('del') -@click.argument('vid', metavar='', required=True, type=int) +@click.argument('vid', metavar='', required=True) +@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") @click.option('--no_restart_dhcp_relay', is_flag=True, type=click.BOOL, required=False, default=False, help="If no_restart_dhcp_relay is True, do not restart dhcp_relay while del vlan and \ require dhcpv6 relay of this is empty") @clicommon.pass_db -def del_vlan(db, vid, no_restart_dhcp_relay): +def del_vlan(db, vid, multiple, no_restart_dhcp_relay): """Delete VLAN""" - log.log_info("'vlan del {}' executing...".format(vid)) - ctx = click.get_current_context() - vlan = 'Vlan{}'.format(vid) - if no_restart_dhcp_relay: - if is_dhcpv6_relay_config_exist(db, vlan): - ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) + vid_list = [] + # parser will parse the vid input if there are syntax errors it will throw error + if multiple: + vid_list = clicommon.multiple_vlan_parser(ctx, vid) + else: + if not vid.isdigit(): + ctx.fail("{} is not integer".format(vid)) + vid_list.append(int(vid)) + config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - ctx.fail("{} does not exist".format(vlan)) - - intf_table = db.cfgdb.get_table('VLAN_INTERFACE') - for intf_key in intf_table: - if ((type(intf_key) is str and intf_key == 'Vlan{}'.format(vid)) or # TODO: MISSING CONSTRAINT IN YANG MODEL - (type(intf_key) is tuple and intf_key[0] == 'Vlan{}'.format(vid))): - ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) - - keys = [ (k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid) ] - - if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) - - vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') - for vxmap_key, vxmap_data in vxlan_table.items(): - if vxmap_data['vlan'] == 'Vlan{}'.format(vid): - ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key)) ) + for vid in vid_list: + log.log_info("'vlan del {}' executing...".format(vid)) + + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) + + #Multiple VLANs needs to be referenced + vlan = 'Vlan{}'.format(vid) + + #Multiple VLANs needs to be checked + if no_restart_dhcp_relay: + if is_dhcpv6_relay_config_exist(db, vlan): + ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + log.log_info("{} does not exist".format(vlan)) + ctx.fail("{} does not exist, Aborting!!!".format(vlan)) + + intf_table = db.cfgdb.get_table('VLAN_INTERFACE') + for intf_key in intf_table: + if ((type(intf_key) is str and intf_key == 'Vlan{}'.format(vid)) or # TODO: MISSING CONSTRAINT IN YANG MODEL + (type(intf_key) is tuple and intf_key[0] == 'Vlan{}'.format(vid))): + ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) + + keys = [(k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid)] + + if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) + + vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') + for vxmap_key, vxmap_data in vxlan_table.items(): + if vxmap_data['vlan'] == 'Vlan{}'.format(vid): + ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key))) + + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, None) - # set dhcpv4_relay table - set_dhcp_relay_table('VLAN', config_db, vlan, None) + if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) + # We need to restart dhcp_relay service after dhcpv6_relay config change + if is_dhcp_relay_running(): + dhcp_relay_util.handle_restart_dhcp_relay_service() - if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): - # set dhcpv6_relay table - set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) - # We need to restart dhcp_relay service after dhcpv6_relay config change - if is_dhcp_relay_running(): - dhcp_relay_util.handle_restart_dhcp_relay_service() - delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) - delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) vlans = db.cfgdb.get_keys('VLAN') if not vlans: @@ -189,97 +231,159 @@ def vlan_member(): pass @vlan_member.command('add') -@click.argument('vid', metavar='', required=True, type=int) +@click.argument('vid', metavar='', required=True) @click.argument('port', metavar='port', required=True) -@click.option('-u', '--untagged', is_flag=True) +@click.option('-u', '--untagged', is_flag=True, help="Untagged status") +@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") +@click.option('-e', '--except_flag', is_flag=True, help="Skips the given vlans and adds all other existing vlans") @clicommon.pass_db -def add_vlan_member(db, vid, port, untagged): +def add_vlan_member(db, vid, port, untagged, multiple, except_flag): """Add VLAN member""" ctx = click.get_current_context() - log.log_info("'vlan member add {} {}' executing...".format(vid, port)) + # parser will parse the vid input if there are syntax errors it will throw error - vlan = 'Vlan{}'.format(vid) + vid_list = clicommon.vlan_member_input_parser(ctx, "add", db, except_flag, multiple, vid, port) + + # multiple vlan command cannot be used to add multiple untagged vlan members + if untagged and (multiple or except_flag or vid == "all"): + ctx.fail("{} cannot have more than one untagged Vlan.".format(port)) config_db = ValidatedConfigDBConnector(db.cfgdb) + if ADHOC_VALIDATION: - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - ctx.fail("{} does not exist".format(vlan)) - - if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL - alias = port - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - port = iface_alias_converter.alias_to_name(alias) - if port is None: - ctx.fail("cannot find port name for alias {}".format(alias)) - - if clicommon.is_port_mirror_dst_port(db.cfgdb, port): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is configured as mirror destination port".format(port)) - - if clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is already a member of {}".format(port, vlan)) - - if clicommon.is_valid_port(db.cfgdb, port): - is_port = True - elif clicommon.is_valid_portchannel(db.cfgdb, port): - is_port = False - else: - ctx.fail("{} does not exist".format(port)) - - if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ - (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is a router interface!".format(port)) + for vid in vid_list: + + vlan = 'Vlan{}'.format(vid) + + # default vlan checker + if vid == 1: + ctx.fail("{} is default VLAN".format(vlan)) + + log.log_info("'vlan member add {} {}' executing...".format(vid, port)) + + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + log.log_info("{} does not exist".format(vlan)) + ctx.fail("{} does not exist".format(vlan)) + + if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL + alias = port + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + port = iface_alias_converter.alias_to_name(alias) + if port is None: + ctx.fail("cannot find port name for alias {}".format(alias)) + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if clicommon.is_port_mirror_dst_port(db.cfgdb, port): + ctx.fail("{} is configured as mirror destination port".format(port)) + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if clicommon.is_port_vlan_member(db.cfgdb, port, vlan): + log.log_info("{} is already a member of {}, Aborting!!!".format(port, vlan)) + ctx.fail("{} is already a member of {}, Aborting!!!".format(port, vlan)) + + + if clicommon.is_valid_port(db.cfgdb, port): + is_port = True + elif clicommon.is_valid_portchannel(db.cfgdb, port): + is_port = False + else: + ctx.fail("{} does not exist".format(port)) + + if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ + (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) + + portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): + ctx.fail("{} is part of portchannel!".format(port)) + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if (clicommon.interface_is_untagged_member(db.cfgdb, port) and untagged): + ctx.fail("{} is already untagged member!".format(port)) + + # checking mode status of port if its access, trunk or routed + if is_port: + port_data = config_db.get_entry('PORT',port) + + # if not port then is a port channel + elif not is_port: + port_data = config_db.get_entry('PORTCHANNEL',port) + + if "mode" not in port_data: + ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) + else: + existing_mode = port_data["mode"] + + if existing_mode == "routed": + ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) + + mode_type = "access" if untagged else "trunk" + if existing_mode == "access" and mode_type == "trunk": # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is in access mode! Tagged Members cannot be added".format(port)) + + elif existing_mode == mode_type or (existing_mode == "trunk" and mode_type == "access"): + pass + + # in case of exception in list last added member will be shown to user - portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') - - if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is part of portchannel!".format(port)) - - if (clicommon.interface_is_untagged_member(db.cfgdb, port) and untagged): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is already untagged member!".format(port)) - - try: - config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged" }) - except ValueError: - ctx.fail("{} invalid or does not exist, or {} invalid or does not exist".format(vlan, port)) + try: + config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged" }) + except ValueError: + ctx.fail("{} invalid or does not exist, or {} invalid or does not exist".format(vlan, port)) @vlan_member.command('del') -@click.argument('vid', metavar='', required=True, type=int) +@click.argument('vid', metavar='', required=True) @click.argument('port', metavar='', required=True) +@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") +@click.option('-e', '--except_flag', is_flag=True, help="Skips the given vlans and adds all other existing vlans") @clicommon.pass_db -def del_vlan_member(db, vid, port): +def del_vlan_member(db, vid, port, multiple, except_flag): """Delete VLAN member""" ctx = click.get_current_context() - log.log_info("'vlan member del {} {}' executing...".format(vid, port)) - vlan = 'Vlan{}'.format(vid) + # parser will parse the vid input if there are syntax errors it will throw error + + vid_list = clicommon.vlan_member_input_parser(ctx,"del", db, except_flag, multiple, vid, port) + config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - ctx.fail("{} does not exist".format(vlan)) - - if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL - alias = port - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - port = iface_alias_converter.alias_to_name(alias) - if port is None: - ctx.fail("cannot find port name for alias {}".format(alias)) - - if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is not a member of {}".format(port, vlan)) - - try: - config_db.set_entry('VLAN_MEMBER', (vlan, port), None) - delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) - delete_db_entry("DHCP_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) - except JsonPatchConflict: - ctx.fail("{} invalid or does not exist, or {} is not a member of {}".format(vlan, port, vlan)) + for vid in vid_list: + + log.log_info("'vlan member del {} {}' executing...".format(vid, port)) + + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) + + vlan = 'Vlan{}'.format(vid) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + log.log_info("{} does not exist".format(vlan)) + ctx.fail("{} does not exist, Aborting!!!".format(vlan)) + + if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL + alias = port + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + port = iface_alias_converter.alias_to_name(alias) + if port is None: + ctx.fail("cannot find port name for alias {}".format(alias)) + + # TODO: MISSING CONSTRAINT IN YANG MODEL + if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): + ctx.fail("{} is not a member of {}".format(port, vlan)) + + + try: + config_db.set_entry('VLAN_MEMBER', (vlan, port), None) + delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) + delete_db_entry("DHCP_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) + except JsonPatchConflict: + ctx.fail("{} invalid or does not exist, or {} is not a member of {}".format(vlan, port, vlan)) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index fb0c94aead8..7ded58a8725 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -162,6 +162,8 @@ * [Subinterfaces](#subinterfaces) * [Subinterfaces Show Commands](#subinterfaces-show-commands) * [Subinterfaces Config Commands](#subinterfaces-config-commands) +* [Switchport Modes](#switchport-modes) + * [Switchport Modes Config Commands](#switchportmodes-config-commands) * [Syslog](#syslog) * [Syslog show commands](#syslog-show-commands) * [Syslog config commands](#syslog-config-commands) @@ -4471,6 +4473,7 @@ Subsequent pages explain each of these commands in detail. neighbor Show neighbor related information portchannel Show PortChannel information status Show Interface status information + switchport Show Interface switchport information tpid Show Interface tpid information transceiver Show SFP Transceiver information ``` @@ -4936,6 +4939,53 @@ This command displays some more fields such as Lanes, Speed, MTU, Type, Asymmetr Ethernet180 105,106,107,108 100G 9100 hundredGigE46 down down N/A N/A ``` + +**show interface switchport status** + +This command displays switchport modes status of the interfaces + +- Usage: + ``` + show interfaces switchport status + ``` + +- Example (show interface switchport status of all interfaces): + ``` + admin@sonic:~$ show interfaces switchport status + Interface Mode + ----------- -------- + Ethernet0 access + Ethernet4 trunk + Ethernet8 routed + + ``` + +**show interface switchport config** + +This command displays switchport modes configuration of the interfaces + +- Usage: + ``` + show interfaces switchport config + ``` + +- Example (show interface switchport config of all interfaces): + ``` + admin@sonic:~$ show interfaces switchport config + Interface Mode Untagged Tagged + ----------- -------- -------- ------- + Ethernet0 access 2 + Ethernet4 trunk 3 4,5,6 + Ethernet8 routed + + ``` + + +For details please refer [Switchport Mode HLD](https://github.com/sonic-net/SONiC/pull/912/files#diff-03597c34684d527192f76a6e975792fcfc83f54e20dde63f159399232d148397) to know more about this command. + + + + **show interfaces transceiver** This command is already explained [here](#Transceivers) @@ -10072,6 +10122,41 @@ This sub-section explains how to configure subinterfaces. Go Back To [Beginning of the document](#) or [Beginning of this section](#subinterfaces) + + +## Switchport Modes + +### Switchport Modes Config Commands + +This subsection explains how to configure switchport modes on a Port/PortChannel. + +**config switchport mode ** + +Usage: + ``` + config switchport mode + ``` + +- Example (Config switchport mode access on "Ethernet0): + ``` + admin@sonic:~$ sudo config switchport mode access Ethernet0 + ``` + +- Example (Config switchport mode trunk on "Ethernet4"): + ``` + admin@sonic:~$ sudo config switchport mode trunk Ethernet4 + ``` + +- Example (Config switchport mode routed on "Ethernet12"): + ``` + admin@sonic:~$ sudo config switchport mode routed Ethernet12 + ``` + + + +Go Back To [Beginning of the document](#) or [Beginning of this section](#switchport-modes) + + ## Syslog ### Syslog Show Commands @@ -10784,6 +10869,31 @@ This command is used to add or delete the vlan. admin@sonic:~$ sudo config vlan add 100 ``` + +**config vlan add/del -m** + +This command is used to add or delete multiple vlans via single command. + +- Usage: + ``` + config vlan (add | del) -m + ``` + +- Example01 (Create the VLAN "Vlan100, Vlan101, Vlan102, Vlan103" if these does not already exist) + + ``` + admin@sonic:~$ sudo config vlan add -m 100-103 + ``` + + +- Example02 (Create the VLAN "Vlan105, Vlan106, Vlan107, Vlan108" if these does not already exist): + + ``` + admin@sonic:~$ sudo config vlan add -m 105,106,107,108 + ``` + + + **config vlan member add/del** This command is to add or delete a member port into the already created vlan. @@ -10805,6 +10915,49 @@ This command is to add or delete a member port into the already created vlan. This command will add Ethernet4 as member of the vlan 100. ``` + +**config vlan member add/del -m -e** + +This command is to add or delete a member port into multiple already created vlans. + +- Usage: + ``` + config vlan member add/del [-m] [-e] + ``` + +*NOTE: -m flag multiple Vlans in range or comma separted list can be added as a member port.* + + +*NOTE: -e is used as an except flag as explained with examples below.* + + +- Example: + ``` + admin@sonic:~$ sudo config vlan member add -m 100-103 Ethernet0 + This command will add Ethernet0 as member of the vlan 100, vlan 101, vlan 102, vlan 103 + ``` + + ``` + admin@sonic:~$ sudo config vlan member add -m 100,101,102 Ethernet4 + This command will add Ethernet4 as member of the vlan 100, vlan 101, vlan 102 + ``` + + ``` + admin@sonic:~$ sudo config vlan member add -e -m 104,105 Ethernet8 + Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet8 as member of vlan 100, vlan 101, vlan 102, vlan 103 + ``` + + ``` + admin@sonic:~$ sudo config vlan member add -e 100 Ethernet12 + Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet12 as member of vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 + ``` + + ``` + admin@sonic:~$ sudo config vlan member add all Ethernet20 + Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet20 as member of vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 + ``` + + **config proxy_arp enabled/disabled** This command is used to enable or disable proxy ARP for a VLAN interface diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index c4d4e2da9c9..fab66624148 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -509,6 +509,39 @@ def migrate_config_db_port_table_for_auto_neg(self): self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'adv_speeds', value['speed']) elif value['autoneg'] == '0': self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'off') + + + def migrate_config_db_switchport_mode(self): + port_table = self.configDB.get_table('PORT') + portchannel_table = self.configDB.get_table('PORTCHANNEL') + vlan_member_table = self.configDB.get_table('VLAN_MEMBER') + + vlan_member_keys= [] + for _,key in vlan_member_table: + vlan_member_keys.append(key) + + for p_key, p_value in port_table.items(): + if 'mode' in p_value: + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format("PORT", p_key), 'mode', p_value['mode']) + else: + if p_key in vlan_member_keys: + p_value["mode"] = "trunk" + self.configDB.set_entry("PORT", p_key, p_value) + else: + p_value["mode"] = "routed" + self.configDB.set_entry("PORT", p_key, p_value) + + for pc_key, pc_value in portchannel_table.items(): + if 'mode' in pc_value: + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format("PORTCHANNEL", pc_key), 'mode', pc_value['mode']) + else: + if pc_key in vlan_member_keys: + pc_value["mode"] = "trunk" + self.configDB.set_entry("PORTCHANNEL", pc_key, pc_value) + else: + pc_value["mode"] = "routed" + self.configDB.set_entry("PORTCHANNEL", pc_key, pc_value) + def migrate_qos_db_fieldval_reference_remove(self, table_list, db, db_num, db_delimeter): for pair in table_list: @@ -981,6 +1014,7 @@ def version_3_0_0(self): """ log.log_info('Handling version_3_0_0') self.migrate_config_db_port_table_for_auto_neg() + self.migrate_config_db_switchport_mode() self.set_version('version_3_0_1') return 'version_3_0_1' @@ -996,7 +1030,9 @@ def version_3_0_1(self): for name, data in portchannel_table.items(): data['lacp_key'] = 'auto' self.configDB.set_entry('PORTCHANNEL', name, data) + self.migrate_config_db_switchport_mode() self.set_version('version_3_0_2') + return 'version_3_0_2' def version_3_0_2(self): @@ -1132,7 +1168,7 @@ def version_4_0_3(self): Version 4_0_3. """ log.log_info('Handling version_4_0_3') - + self.set_version('version_202305_01') return 'version_202305_01' diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index a5a3734664f..497950b80ed 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -797,3 +797,70 @@ def fec_status(interfacename, namespace, display, verbose): cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) + + +# +# switchport group (show interfaces switchport ...) +# +@interfaces.group(name='switchport', cls=clicommon.AliasedGroup) +def switchport(): + """Show interface switchport information""" + pass + + +@switchport.command(name="config") +@clicommon.pass_db +def switchport_mode_config(db): + """Show interface switchport config information""" + + port_data = list(db.cfgdb.get_table('PORT').keys()) + portchannel_data = list(db.cfgdb.get_table('PORTCHANNEL').keys()) + + portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') + + for interface in port_data: + if clicommon.interface_is_in_portchannel(portchannel_member_table,interface): + port_data.remove(interface) + + + keys = port_data + portchannel_data + + def tablelize(keys): + table = [] + + for key in natsorted(keys): + r = [clicommon.get_interface_name_for_display(db, key), clicommon.get_interface_switchport_mode(db,key), clicommon.get_interface_untagged_vlan_members(db,key), clicommon.get_interface_tagged_vlan_members(db,key)] + table.append(r) + + return table + + header = ['Interface', 'Mode', 'Untagged', 'Tagged'] + click.echo(tabulate(tablelize(keys), header, tablefmt="simple", stralign='left')) + +@switchport.command(name="status") +@clicommon.pass_db +def switchport_mode_status(db): + """Show interface switchport status information""" + + port_data = list(db.cfgdb.get_table('PORT').keys()) + portchannel_data = list(db.cfgdb.get_table('PORTCHANNEL').keys()) + + portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') + + for interface in port_data: + if clicommon.interface_is_in_portchannel(portchannel_member_table,interface): + port_data.remove(interface) + + keys = port_data + portchannel_data + + def tablelize(keys): + table = [] + + for key in natsorted(keys): + r = [clicommon.get_interface_name_for_display(db, key), clicommon.get_interface_switchport_mode(db,key)] + table.append(r) + + return table + + header = ['Interface', 'Mode'] + click.echo(tabulate(tablelize(keys), header,tablefmt="simple", stralign='left')) diff --git a/tests/db_migrator_input/config_db/port-an-expected.json b/tests/db_migrator_input/config_db/port-an-expected.json index 1ef2cf49163..14bdc415f4f 100644 --- a/tests/db_migrator_input/config_db/port-an-expected.json +++ b/tests/db_migrator_input/config_db/port-an-expected.json @@ -5,6 +5,7 @@ "description": "etp1a", "mtu": "9100", "alias": "etp1a", + "mode": "routed", "pfc_asym": "off", "speed": "10000", "fec": "none", @@ -18,6 +19,7 @@ "admin_status": "up", "mtu": "9100", "alias": "etp1b", + "mode": "routed", "pfc_asym": "off", "speed": "25000", "fec": "none", @@ -30,6 +32,7 @@ "admin_status": "up", "mtu": "9100", "alias": "etp2a", + "mode": "routed", "pfc_asym": "off", "speed": "50000", "fec": "none" diff --git a/tests/db_migrator_input/config_db/portchannel-expected.json b/tests/db_migrator_input/config_db/portchannel-expected.json index 2644e5f4e9d..874212b2f75 100644 --- a/tests/db_migrator_input/config_db/portchannel-expected.json +++ b/tests/db_migrator_input/config_db/portchannel-expected.json @@ -3,6 +3,7 @@ "admin_status": "up", "members@": "Ethernet0,Ethernet4", "min_links": "2", + "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -10,6 +11,7 @@ "admin_status": "up", "members@": "Ethernet8,Ethernet12", "min_links": "2", + "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -17,6 +19,7 @@ "admin_status": "up", "members@": "Ethernet16", "min_links": "1", + "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -24,12 +27,14 @@ "admin_status": "up", "members@": "Ethernet20,Ethernet24", "min_links": "2", + "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel9999": { "admin_status": "up", "mtu": "9100", + "mode": "routed", "lacp_key": "auto" }, "VERSIONS|DATABASE": { diff --git a/tests/db_migrator_input/config_db/switchport-expected.json b/tests/db_migrator_input/config_db/switchport-expected.json new file mode 100644 index 00000000000..812abbd58fd --- /dev/null +++ b/tests/db_migrator_input/config_db/switchport-expected.json @@ -0,0 +1,144 @@ +{ + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "fortyGigE0/0", + "index": "0", + "lanes": "25,26,27,28", + "mode": "trunk", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "index": "1", + "lanes": "29,30,31,32", + "mode": "routed", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "index": "2", + "lanes": "33,34,35,36", + "mode": "trunk", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "fortyGigE0/12", + "index": "3", + "lanes": "37,38,39,40", + "mode": "access", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "fortyGigE0/16", + "index": "4", + "lanes": "45,46,47,48", + "mode": "routed", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "fortyGigE0/20", + "index": "5", + "lanes": "41,42,43,44", + "mode": "trunk", + "mtu": "9100", + "speed": "40000" + }, + + "VLAN|Vlan2": { + "vlanid": "2" + }, + "VLAN|Vlan3": { + "vlanid": "3" + }, + "VLAN|Vlan4": { + "vlanid": "4" + }, + "VLAN|Vlan5": { + "vlanid": "5" + }, + "VLAN|Vlan6": { + "vlanid": "6" + }, + "VLAN|Vlan7": { + "vlanid": "7" + }, + + + "VLAN_MEMBER|Vlan2|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan3|Ethernet8": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan4|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan6|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan6|Ethernet8": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan7|Ethernet8": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan5|Ethernet8": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan3|PortChannel0003": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan8|PortChannel0002": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan9|PortChannel0002": { + "tagging_mode": "tagged" + }, + + "PORTCHANNEL|PortChannel0001": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "access", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0002": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "trunk", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0003": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "trunk", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0004": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "routed", + "mtu": "9100" + }, + + "VERSIONS|DATABASE": { + "VERSION": "version_3_0_1" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/switchport-input.json b/tests/db_migrator_input/config_db/switchport-input.json new file mode 100644 index 00000000000..c1ad306ce4d --- /dev/null +++ b/tests/db_migrator_input/config_db/switchport-input.json @@ -0,0 +1,138 @@ +{ + "PORT|Ethernet0": { + "admin_status": "up", + "alias": "fortyGigE0/0", + "index": "0", + "lanes": "25,26,27,28", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "index": "1", + "lanes": "29,30,31,32", + "mode": "routed", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "fortyGigE0/12", + "index": "3", + "lanes": "37,38,39,40", + "mode": "access", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet16": { + "admin_status": "up", + "alias": "fortyGigE0/16", + "index": "4", + "lanes": "45,46,47,48", + "mtu": "9100", + "speed": "40000" + }, + "PORT|Ethernet20": { + "admin_status": "up", + "alias": "fortyGigE0/20", + "index": "5", + "lanes": "41,42,43,44", + "mode": "trunk", + "mtu": "9100", + "speed": "40000" + }, + "VLAN|Vlan2": { + "vlanid": "2" + }, + "VLAN|Vlan3": { + "vlanid": "3" + }, + "VLAN|Vlan4": { + "vlanid": "4" + }, + "VLAN|Vlan5": { + "vlanid": "5" + }, + "VLAN|Vlan6": { + "vlanid": "6" + }, + "VLAN|Vlan7": { + "vlanid": "7" + }, + + "VLAN_MEMBER|Vlan2|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan3|Ethernet8": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan4|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan6|Ethernet0": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan6|Ethernet8": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan7|Ethernet8": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan5|Ethernet8": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan3|PortChannel0003": { + "tagging_mode": "untagged" + }, + "VLAN_MEMBER|Vlan8|PortChannel0002": { + "tagging_mode": "tagged" + }, + "VLAN_MEMBER|Vlan9|PortChannel0002": { + "tagging_mode": "tagged" + }, + + + "PORTCHANNEL|PortChannel0001": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "access", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0002": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mode": "trunk", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0003": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mtu": "9100" + }, + "PORTCHANNEL|PortChannel0004": { + "admin_status": "up", + "fast_rate": "false", + "lacp_key": "auto", + "min_links": "1", + "mtu": "9100" + }, + + "VERSIONS|DATABASE": { + "VERSION": "version_3_0_0" + } +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 21ca9148df6..470f613d377 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -309,6 +309,33 @@ def test_port_autoneg_migrator(self): assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') + + +class TestSwitchPortMigrator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def test_switchport_mode_migrator(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'switchport-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'switchport-expected') + expected_db = Db() + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_1') + + assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') + assert dbmgtr.configDB.get_table('PORTCHANNEL') == expected_db.cfgdb.get_table('PORTCHANNEL') + assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') + + class TestInitConfigMigrator(object): @classmethod def setup_class(cls): @@ -874,6 +901,7 @@ def test_golden_config_hostname(self): # hostname is from minigraph.xml assert hostname == 'SONiC-Dummy' + class TestMain(object): @classmethod def setup_class(cls): diff --git a/tests/interfaces_test.py b/tests/interfaces_test.py index c3246ba0261..717dcbb4c9b 100644 --- a/tests/interfaces_test.py +++ b/tests/interfaces_test.py @@ -144,6 +144,86 @@ 1001 PortChannel1001 N/A """ + +show_interfaces_switchport_status_output="""\ +Interface Mode +--------------- ------ +Ethernet0 routed +Ethernet4 trunk +Ethernet8 routed +Ethernet12 routed +Ethernet16 trunk +Ethernet20 routed +Ethernet24 trunk +Ethernet28 trunk +Ethernet36 routed +Ethernet40 routed +Ethernet44 routed +Ethernet48 routed +Ethernet52 routed +Ethernet56 routed +Ethernet60 routed +Ethernet64 routed +Ethernet68 routed +Ethernet72 routed +Ethernet76 routed +Ethernet80 routed +Ethernet84 routed +Ethernet88 routed +Ethernet92 routed +Ethernet96 routed +Ethernet100 routed +Ethernet104 routed +Ethernet108 routed +Ethernet116 routed +Ethernet124 routed +PortChannel0001 routed +PortChannel0002 routed +PortChannel0003 routed +PortChannel0004 routed +PortChannel1001 trunk +""" + +show_interfaces_switchport_config_output = """\ +Interface Mode Untagged Tagged +--------------- ------ ---------- -------- +Ethernet0 routed +Ethernet4 trunk 1000 +Ethernet8 routed 1000 +Ethernet12 routed 1000 +Ethernet16 trunk 1000 +Ethernet20 routed +Ethernet24 trunk 2000 +Ethernet28 trunk 2000 +Ethernet36 routed +Ethernet40 routed +Ethernet44 routed +Ethernet48 routed +Ethernet52 routed +Ethernet56 routed +Ethernet60 routed +Ethernet64 routed +Ethernet68 routed +Ethernet72 routed +Ethernet76 routed +Ethernet80 routed +Ethernet84 routed +Ethernet88 routed +Ethernet92 routed +Ethernet96 routed +Ethernet100 routed +Ethernet104 routed +Ethernet108 routed +Ethernet116 routed +Ethernet124 routed +PortChannel0001 routed +PortChannel0002 routed +PortChannel0003 routed +PortChannel0004 routed +PortChannel1001 trunk 4000 +""" + + class TestInterfaces(object): @classmethod def setup_class(cls): @@ -337,6 +417,26 @@ def test_parse_interface_in_filter(self): assert len(intf_list) == 3 assert intf_list == ["Ethernet-BP10", "Ethernet-BP11", "Ethernet-BP12"] + + def test_show_interfaces_switchport_status(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["switchport"].commands["status"]) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert result.output == show_interfaces_switchport_status_output + + def test_show_interfaces_switchport_config(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["switchport"].commands["config"]) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert result.output == show_interfaces_switchport_config_output + + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/ipv6_link_local_test.py b/tests/ipv6_link_local_test.py index 50b691be6b1..bb9e53ac1ab 100644 --- a/tests/ipv6_link_local_test.py +++ b/tests/ipv6_link_local_test.py @@ -232,7 +232,7 @@ def test_vlan_member_add_on_link_local_interface(self): result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["4000", "Ethernet40"], obj=obj) print(result.output) assert result.exit_code != 0 - assert 'Error: Ethernet40 is a router interface!' in result.output + assert 'Error: Ethernet40 is in routed mode!\nUse switchport mode command to change port mode' in result.output @classmethod def teardown_class(cls): diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 4bb4c9d9e93..ffee9478f34 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -75,6 +75,7 @@ "admin_status": "up", "members@": "Ethernet0,Ethernet4", "min_links": "2", + "mode": "trunk", "mtu": "9100" }, "PORTCHANNEL|PortChannel4001": { diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 2a81f96bfac..0ef506c2887 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -30,6 +30,7 @@ "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -41,6 +42,7 @@ "lanes": "29,30,31,32", "mtu": "9100", "tpid": "0x8100", + "mode": "trunk", "pfc_asym": "off", "speed": "40000" }, @@ -52,6 +54,7 @@ "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -63,6 +66,7 @@ "lanes": "37,38,39,40", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -74,6 +78,7 @@ "lanes": "16", "mtu": "9100", "tpid": "0x8100", + "mode": "trunk", "pfc_asym": "off", "speed": "100" }, @@ -85,6 +90,7 @@ "lanes": "41,42,43,44", "mtu": "9100", "tpid": "0x9200", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -96,6 +102,7 @@ "lanes": "1,2,3,4", "mtu": "9100", "tpid": "0x8100", + "mode": "trunk", "pfc_asym": "off", "speed": "1000" }, @@ -107,6 +114,7 @@ "lanes": "5,6,7,8", "mtu": "9100", "tpid": "0x8100", + "mode": "trunk", "pfc_asym": "off", "speed": "1000" }, @@ -118,6 +126,7 @@ "lanes": "13,14,15,16", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -129,6 +138,7 @@ "lanes": "9,10,11,12", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "10" }, @@ -140,6 +150,7 @@ "lanes": "17,18,19,20", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -151,6 +162,7 @@ "lanes": "21,22,23,24", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -162,6 +174,7 @@ "lanes": "53,54,55,56", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -173,6 +186,7 @@ "lanes": "49,50,51,52", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -184,6 +198,7 @@ "lanes": "57,58,59,60", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -195,6 +210,7 @@ "lanes": "61,62,63,64", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -206,6 +222,7 @@ "lanes": "69,70,71,72", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -217,6 +234,7 @@ "lanes": "65,66,67,68", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -228,6 +246,7 @@ "lanes": "73,74,75,76", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -239,6 +258,7 @@ "lanes": "77,78,79,80", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -250,6 +270,7 @@ "lanes": "109,110,111,112", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -261,6 +282,7 @@ "lanes": "105,106,107,108", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -272,6 +294,7 @@ "lanes": "113,114,115,116", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -283,6 +306,7 @@ "lanes": "117,118,119,120", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -294,6 +318,7 @@ "lanes": "125,126,127,128", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -304,6 +329,7 @@ "lanes": "121,122,123,124", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -314,6 +340,7 @@ "lanes": "81,82,83,84", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -324,6 +351,7 @@ "lanes": "85,86,87,88", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -335,6 +363,7 @@ "lanes": "93,94,95,96", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -346,6 +375,7 @@ "lanes": "89,90,91,92", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -357,6 +387,7 @@ "lanes": "101,102,103,104", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -368,6 +399,7 @@ "lanes": "97,98,99,100", "mtu": "9100", "tpid": "0x8100", + "mode": "routed", "pfc_asym": "off", "speed": "40000", "fec" : "auto" @@ -671,7 +703,8 @@ "members@": "Ethernet32", "min_links": "1", "tpid": "0x8100", - "mtu": "9100" + "mtu": "9100", + "mode": "trunk" }, "PORTCHANNEL|PortChannel0001": { "admin_status": "up", diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 436e309281d..4d7ed0e947c 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -134,6 +134,124 @@ +-----------+-----------------+-----------------+----------------+-------------+ """ + +test_config_add_del_multiple_vlan_and_vlan_member_output="""\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1002 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1003 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + +test_config_add_del_add_vlans_and_add_all_vlan_member_output="""\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | +| | | Ethernet20 | tagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1002 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1003 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet20 | tagged | enabled | +| | fc02:1011::1/64 | Ethernet24 | untagged | | +| | | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | Ethernet20 | tagged | disabled | +| | | PortChannel1001 | tagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + +test_config_add_del_add_vlans_and_add_vlans_member_except_vlan_output = """\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1002 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet20 | tagged | enabled | +| | fc02:1011::1/64 | Ethernet24 | untagged | | +| | | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + +test_config_add_del_add_vlans_and_add_vlans_member_except_vlan__after_del_member_output = """\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1002 | | Ethernet20 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + +test_config_add_del_vlan_and_vlan_member_with_switchport_modes_output = """\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | +| | | Ethernet20 | tagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | untagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + config_add_del_vlan_and_vlan_member_in_alias_mode_output="""\ +-----------+-----------------+-----------------+----------------+-------------+ | VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | @@ -155,6 +273,28 @@ """ + +test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types_output = """\ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +""" + + class TestVlan(object): _old_run_bgp_command = None @classmethod @@ -236,7 +376,7 @@ def test_config_vlan_add_vlan_with_invalid_vlanid(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output def test_config_vlan_add_vlan_with_exist_vlanid(self): runner = CliRunner() @@ -252,7 +392,7 @@ def test_config_vlan_del_vlan_with_invalid_vlanid(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output def test_config_vlan_del_vlan_with_nonexist_vlanid(self): runner = CliRunner() @@ -262,13 +402,80 @@ def test_config_vlan_del_vlan_with_nonexist_vlanid(self): assert result.exit_code != 0 assert "Error: Vlan1001 does not exist" in result.output + + def test_config_vlan_add_vlan_with_multiple_vlanids(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10,20,30,40", "--multiple"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + def test_config_vlan_add_vlan_with_multiple_vlanids_with_range(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10-20", "--multiple"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + def test_config_vlan_add_vlan_with_multiple_vlanids_with_range_and_multiple_ids(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10-15,20,25,30", "--multiple"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + def test_config_vlan_add_vlan_with_wrong_range(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["15-10", "--multiple"]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "15 is greater than 10. List cannot be generated" in result.output + + def test_config_vlan_add_vlan_range_with_default_vlan(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1-10", "--multiple"]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Vlan1 is default vlan" in result.output + + def test_config_vlan_add_vlan_is_digit_fail(self): + runner = CliRunner() + vid = "test_fail_case" + result = runner.invoke(config.config.commands["vlan"].commands["add"], [vid]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "{} is not integer".format(vid) in result.output + + def test_config_vlan_add_vlan_is_default_vlan(self): + runner = CliRunner() + default_vid = "1" + vlan = "Vlan{}".format(default_vid) + result = runner.invoke(config.config.commands["vlan"].commands["add"], [default_vid]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "{} is default VLAN.".format(vlan) in result.output + + def test_config_vlan_del_vlan_does_not_exist(self): + runner = CliRunner() + vid = "3010" + vlan = "Vlan{}".format(vid) + result = runner.invoke(config.config.commands["vlan"].commands["del"], [vid]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "{} does not exist".format(vlan) in result.output + def test_config_vlan_add_member_with_invalid_vlanid(self): runner = CliRunner() result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["4096", "Ethernet4"]) print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output def test_config_vlan_add_member_with_nonexist_vlanid(self): runner = CliRunner() @@ -294,8 +501,16 @@ def test_config_vlan_add_nonexist_port_member(self): assert result.exit_code != 0 assert "Error: Ethernet3 does not exist" in result.output + def test_config_vlan_add_nonexist_portchannel_member(self): runner = CliRunner() + #switch port mode for PortChannel1011 to trunk mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "PortChannel1011"]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChannel1011 does not exist" in result.output + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], \ ["1000", "PortChannel1011"]) print(result.exit_code) @@ -303,6 +518,7 @@ def test_config_vlan_add_nonexist_portchannel_member(self): assert result.exit_code != 0 assert "Error: PortChannel1011 does not exist" in result.output + def test_config_vlan_add_portchannel_member(self): runner = CliRunner() db = Db() @@ -313,6 +529,7 @@ def test_config_vlan_add_portchannel_member(self): print(result.output) assert result.exit_code == 0 + # show output result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) print(result.exit_code) @@ -329,7 +546,7 @@ def test_config_vlan_add_rif_portchannel_member(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: PortChannel0001 is a router interface!" in result.output + assert "Error: PortChannel0001 is in routed mode!\nUse switchport mode command to change port mode" in result.output def test_config_vlan_with_vxlanmap_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() @@ -457,6 +674,22 @@ def test_config_add_del_vlan_and_vlan_member(self, mock_restart_dhcp_relay_servi print(result.output) assert result.exit_code == 0 + # add Ethernet20 to vlan 1001 but Ethernet20 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "Ethernet20", "--untagged"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure Ethernet20 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to access mode" in result.output + # add Ethernet20 to vlan 1001 result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["1001", "Ethernet20", "--untagged"], obj=db) @@ -502,6 +735,22 @@ def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dh print(result.output) assert result.exit_code == 0 + # add etp6 to vlan 1001 but etp6 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "etp6", "--untagged"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure etp6 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "etp6"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to access mode" in result.output + # add etp6 to vlan 1001 result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["1001", "etp6", "--untagged"], obj=db) @@ -538,6 +787,375 @@ def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dh os.environ['SONIC_CLI_IFACE_MODE'] = "default" + + def test_config_add_del_multiple_vlan_and_vlan_member(self,mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001,1002,1003 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002,1003","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001,1002,1003", "Ethernet20", "--multiple"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure Ethernet20 from routed to trunk mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to trunk mode" in result.output + + # add Ethernet20 to vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001,1002,1003", "Ethernet20", "--multiple"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.output) + assert result.output == test_config_add_del_multiple_vlan_and_vlan_member_output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1001-1003", "Ethernet20", "--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add del 1001 + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1003","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_vlan_brief_output + + def test_config_add_del_add_vlans_and_add_vlans_member_except_vlan(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001,1002 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1000,4000", "Ethernet20", "--multiple", "--except_flag"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure Ethernet20 from routed to trunk mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to trunk mode" in result.output + + # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1000,4000", "Ethernet20", "--multiple", "--except_flag"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.output) + assert result.output == test_config_add_del_add_vlans_and_add_vlans_member_except_vlan_output + + # remove vlan member except some + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1001,1002", "Ethernet20", "--multiple", "--except_flag"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == test_config_add_del_add_vlans_and_add_vlans_member_except_vlan__after_del_member_output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1001,1002", "Ethernet20", "--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # del 1001,1002 + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1002","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_vlan_brief_output + + + def test_config_add_del_add_vlans_and_add_all_vlan_member(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002,1003","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["all", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure Ethernet20 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to trunk mode" in result.output + + # add Ethernet20 to vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["all", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.output) + assert result.output == test_config_add_del_add_vlans_and_add_all_vlan_member_output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["all", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add del 1001 + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1003","--multiple"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_vlan_brief_output + + def test_config_add_del_vlan_and_vlan_member_with_switchport_modes(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add Ethernet20 to vlan 1001 but Ethernet20 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "Ethernet20", "--untagged"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + + # configure Ethernet20 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from routed to access mode" in result.output + + # add Ethernet20 to vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "Ethernet20", "--untagged"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # add Ethernet20 to vlan 1001 as tagged member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1000", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet20 is in access mode! Tagged Members cannot be added" in result.output + + # configure Ethernet20 from access to trunk mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from access to trunk mode" in result.output + + # add Ethernet20 to vlan 1001 as tagged member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1000", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.output) + assert result.output == test_config_add_del_vlan_and_vlan_member_with_switchport_modes_output + + # configure Ethernet20 from trunk to routed mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Ethernet20 has tagged member(s). \nRemove them to change mode to routed" in result.output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1000", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # configure Ethernet20 from trunk to routed mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Ethernet20 has untagged member. \nRemove it to change mode to routed" in result.output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1001", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # configure Ethernet20 from trunk to routed mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet20 switched from trunk to routed mode" in result.output + + # add del 1001 + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_vlan_brief_output + + + def test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types(self, mock_restart_dhcp_relay_service): + runner = CliRunner() + db = Db() + + # add vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # add Ethernet64 to vlan 1001 but Ethernet64 is in routed mode will give error + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Ethernet64 is in routed mode!\nUse switchport mode command to change port mode" in result.output + + # configure Ethernet64 from routed to trunk mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet64 switched from routed to trunk mode" in result.output + + # add Ethernet64 to vlan 1001 + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["1001", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + + # configure Ethernet64 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Ethernet64 is in trunk mode and have tagged member(s).\nRemove tagged member(s) from Ethernet64 to switch to access mode" in result.output + + # remove vlan member + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], + ["1001", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + # configure Ethernet64 from routed to access mode + result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet64"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "Ethernet64 switched from trunk to access mode" in result.output + + # show output + result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types_output + + def test_config_vlan_proxy_arp_with_nonexist_vlan_intf_table(self): modes = ["enabled", "disabled"] runner = CliRunner() @@ -626,8 +1244,8 @@ def test_config_set_router_port_on_member_interface(self): result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 - assert 'Interface Ethernet4 is a member of vlan' in result.output + assert result.exit_code != 0 + assert 'Interface Ethernet4 is not in routed mode!' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 9d3cdae7109..af3ca849037 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -269,6 +269,111 @@ def is_port_vlan_member(config_db, port, vlan): return False + +def vlan_range_list(ctx, vid_range: str) -> list: + + vid1, vid2 = map(int, vid_range.split("-")) + + if vid1 == 1 or vid2 == 1: + ctx.fail("Vlan1 is default vlan") + + if vid1 >= vid2: + ctx.fail("{} is greater than {}. List cannot be generated".format(vid1,vid2)) + + if is_vlanid_in_range(vid1) and is_vlanid_in_range(vid2): + return list(range(vid1, vid2+1)) + else: + ctx.fail("Invalid VLAN ID must be in (2-4094)") + + +def multiple_vlan_parser(ctx, s_input: str) -> list: + + vlan_list = [] + + vlan_map = map(str, s_input.replace(" ", "").split(",")) + for vlan in vlan_map: + if "-" in vlan: + vlan_list += vlan_range_list(ctx, vlan) + elif vlan.isdigit() and int(vlan) not in vlan_list: + vlan_list.append(int(vlan)) + elif not vlan.isdigit(): + ctx.fail("{} is not integer".format(vlan)) + + vlan_list.sort() + return vlan_list + + +def get_existing_vlan_id(db) -> list: + existing_vlans = [] + vlan_data = db.cfgdb.get_table('VLAN') + + for i in vlan_data.keys(): + existing_vlans.append(int(i.strip("Vlan"))) + + return sorted(existing_vlans) + +def get_existing_vlan_id_on_interface(db,port) -> list: + intf_vlans = [] + vlan_member_data = db.cfgdb.get_table('VLAN_MEMBER') + + for (k,v) in vlan_member_data.keys(): + if v == port: + intf_vlans.append(int(k.strip("Vlan"))) + + return sorted(intf_vlans) + + +def vlan_member_input_parser(ctx, command_mode, db, except_flag, multiple, vid, port) -> list: + vid_list = [] + if vid == "all": + if command_mode == "add": + return get_existing_vlan_id(db) # config vlan member add + if command_mode == "del": + return get_existing_vlan_id_on_interface(db,port) # config vlan member del + + if multiple: + vid_list = multiple_vlan_parser(ctx, vid) + + if except_flag: + if command_mode == "add": + comp_list = get_existing_vlan_id(db) # config vlan member add + + elif command_mode == "del": + comp_list = get_existing_vlan_id_on_interface(db,port) # config vlan member del + + if multiple: + for i in vid_list: + if i in comp_list: + comp_list.remove(i) + + else: + if not vid.isdigit(): + ctx.fail("Vlan is not integer.") + vid = int(vid) + if vid in comp_list: + comp_list.remove(vid) + vid_list = comp_list + + elif not multiple: + # if entered vlan is not a integer + if not vid.isdigit(): + ctx.fail("Vlan is not integer.") + vid_list.append(int(vid)) + + # sorting the vid_list + vid_list.sort() + return vid_list + +def interface_is_tagged_member(db, interface_name): + """ Check if interface has tagged members i.e. is in trunk mode""" + vlan_member_table = db.get_table('VLAN_MEMBER') + + for key, val in vlan_member_table.items(): + if(key[1] == interface_name): + if (val['tagging_mode'] == 'tagged'): + return True + return False + def interface_is_in_vlan(vlan_member_table, interface_name): """ Check if an interface is in a vlan """ for _,intf in vlan_member_table: @@ -309,6 +414,55 @@ def is_pc_router_interface(config_db, pc): return False +def get_vlan_id(vlan): + vlan_prefix, vid = vlan.split('Vlan') + return vid + +def get_interface_name_for_display(db ,interface): + interface_naming_mode = get_interface_naming_mode() + iface_alias_converter = InterfaceAliasConverter(db) + if interface_naming_mode == "alias" and interface: + return iface_alias_converter.name_to_alias(interface) + return interface + +def get_interface_untagged_vlan_members(db,interface): + untagged_vlans = [] + vlan_member = db.cfgdb.get_table('VLAN_MEMBER') + + for member in natsorted(list(vlan_member.keys())): + interface_vlan, interface_name = member + + if interface == interface_name and vlan_member[member]['tagging_mode'] == 'untagged': + untagged_vlans.append(get_vlan_id(interface_vlan)) + + return "\n".join(untagged_vlans) + +def get_interface_tagged_vlan_members(db,interface): + tagged_vlans = [] + formatted_tagged_vlans = [] + vlan_member = db.cfgdb.get_table('VLAN_MEMBER') + + for member in natsorted(list(vlan_member.keys())): + interface_vlan, interface_name = member + + if interface == interface_name and vlan_member[member]['tagging_mode'] == 'tagged': + tagged_vlans.append(get_vlan_id(interface_vlan)) + + for i in range(len(tagged_vlans)//5+1): + formatted_tagged_vlans.append(" ,".join([str(x) for x in tagged_vlans[i*5:(i+1)*5]])) + + return "\n".join(formatted_tagged_vlans) + +def get_interface_switchport_mode(db, interface): + port = db.cfgdb.get_entry('PORT',interface) + portchannel = db.cfgdb.get_entry('PORTCHANNEL',interface) + switchport_mode = 'routed' + if "mode" in port: + switchport_mode = port['mode'] + elif "mode" in portchannel: + switchport_mode = portchannel['mode'] + return switchport_mode + def is_port_mirror_dst_port(config_db, port): """Check if port is already configured as mirror destination port """ mirror_table = config_db.get_table('MIRROR_SESSION') @@ -710,4 +864,4 @@ def remove(self): def remove_all(self): """ Remove the content of the cache for all users """ - shutil.rmtree(self.cache_directory_app) + shutil.rmtree(self.cache_directory_app) \ No newline at end of file From 5f0ffcca986804c2c402a3f17f47a3df74d2f96e Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Fri, 1 Mar 2024 16:58:15 -0800 Subject: [PATCH 08/45] [fast/warm-reboot] Put ERR message in syslog when a failure is seen (#3186) MSFT ADO: 26918588 This change is to add ERR logs generated during warm-reboot script to syslog. Currently the ERR logs are sent in stdout and a corresponding entry is not added to syslog. This makes it difficult to debug issues when stdout is not readily available. How I did it Modified existing error function to add the log entry to syslog. How to verify it Verified manually on physical device --- scripts/fast-reboot | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 922d217e3fa..f265318aa2f 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -54,6 +54,7 @@ EXIT_TEAMD_RETRY_COUNT_FAILURE=23 function error() { echo $@ >&2 + logger -p user.err "Error seen during warm-reboot shutdown process: $@" } function debug() From 9aa9eaa508902db2c90b9d37f3e0c9bbbfb657f8 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 7 Mar 2024 10:02:30 +0800 Subject: [PATCH 09/45] [config] Add Table hard dependency check (#3159) ADO: 26732148 #### What I did Add YANG hard depdency check for AAA and TACPLUS table #### How I did it Add a special check #### How to verify it Unit test --- config/main.py | 19 +++++++++++++ .../aaa_yang_hard_check.json | 28 +++++++++++++++++++ tests/config_override_test.py | 20 +++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 tests/config_override_input/aaa_yang_hard_check.json diff --git a/config/main.py b/config/main.py index e4065142f23..4d2f4ed6005 100644 --- a/config/main.py +++ b/config/main.py @@ -1959,6 +1959,9 @@ def override_config_table(db, input_config_db, dry_run): # Use deepcopy by default to avoid modifying input config updated_config = update_config(current_config, ns_config_input) + # Enable YANG hard dependecy check to exit early if not satisfied + table_hard_dependency_check(updated_config) + yang_enabled = device_info.is_yang_config_validation_enabled(config_db) if yang_enabled: # The ConfigMgmt will load YANG and running @@ -2005,6 +2008,22 @@ def override_config_db(config_db, config_input): click.echo("Overriding completed. No service is restarted.") +def table_hard_dependency_check(config_json): + aaa_table_hard_dependency_check(config_json) + + +def aaa_table_hard_dependency_check(config_json): + AAA_TABLE = config_json.get("AAA", {}) + TACPLUS_TABLE = config_json.get("TACPLUS", {}) + + aaa_authentication_login = AAA_TABLE.get("authentication", {}).get("login", "") + tacacs_enable = "tacacs+" in aaa_authentication_login.split(",") + tacplus_passkey = TACPLUS_TABLE.get("global", {}).get("passkey", "") + if tacacs_enable and len(tacplus_passkey) == 0: + click.secho("Authentication with 'tacacs+' is not allowed when passkey not exits.", fg="magenta") + sys.exit(1) + + # # 'hostname' command # diff --git a/tests/config_override_input/aaa_yang_hard_check.json b/tests/config_override_input/aaa_yang_hard_check.json new file mode 100644 index 00000000000..61794f1ece8 --- /dev/null +++ b/tests/config_override_input/aaa_yang_hard_check.json @@ -0,0 +1,28 @@ +{ + "running_config": { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + }, + "golden_config": { + }, + "expected_config": { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + } +} diff --git a/tests/config_override_test.py b/tests/config_override_test.py index 19d2ddc197c..86360d1d9e9 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -18,6 +18,7 @@ FULL_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "full_config_override.json") PORT_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "port_config_override.json") EMPTY_TABLE_REMOVAL = os.path.join(DATA_DIR, "empty_table_removal.json") +AAA_YANG_HARD_CHECK = os.path.join(DATA_DIR, "aaa_yang_hard_check.json") RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") @@ -159,6 +160,25 @@ def test_golden_config_db_empty_table_removal(self): db, config, read_data['running_config'], read_data['golden_config'], read_data['expected_config']) + def test_aaa_yang_hard_depdency_check_failure(self): + """YANG hard depdency must be satisfied""" + db = Db() + with open(AAA_YANG_HARD_CHECK, "r") as f: + read_data = json.load(f) + def read_json_file_side_effect(filename): + return read_data['golden_config'] + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + write_init_config_db(db.cfgdb, read_data['running_config']) + + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + def check_override_config_table(self, db, config, running_config, golden_config, expected_config): def read_json_file_side_effect(filename): From 995a797a0b5f4ad8dd6b0d6cc31421794a350fd0 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Thu, 7 Mar 2024 08:23:59 -0800 Subject: [PATCH 10/45] CLI to skip polling for periodic information for a port in DomInfoUpdateTask thread (#3187) * CLI to skip polling for periodic infomration for a port in DomInfoUpdateTask thread Signed-off-by: Mihir Patel * Fixed unit-test failure * Modified dom_status to dom_polling * Modified comment for failing the command --------- Signed-off-by: Mihir Patel --- config/main.py | 39 +++++++++++++++++++++++++++++++++++++++ doc/Command-Reference.md | 16 ++++++++++++++++ tests/config_xcvr_test.py | 20 ++++++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/config/main.py b/config/main.py index 4d2f4ed6005..6c0b525ca4c 100644 --- a/config/main.py +++ b/config/main.py @@ -108,6 +108,8 @@ DEFAULT_TPID = "0x8100" PORT_MODE= "switchport_mode" +DOM_CONFIG_SUPPORTED_SUBPORTS = ['0', '1'] + asic_type = None DSCP_RANGE = click.IntRange(min=0, max=63) @@ -5192,6 +5194,43 @@ def reset(ctx, interface_name): cmd = ['sudo', 'sfputil', 'reset', str(interface_name)] clicommon.run_command(cmd) +# +# 'dom' subcommand ('config interface transceiver dom ...') +# This command is supported only for +# 1. non-breakout ports (subport = 0 or subport field is absent in CONFIG_DB) +# 2. first subport of breakout ports (subport = 1) + +@transceiver.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('desired_config', metavar='(enable|disable)', type=click.Choice(['enable', 'disable'])) +@click.pass_context +def dom(ctx, interface_name, desired_config): + """Enable/disable DOM monitoring for SFP transceiver module""" + log.log_info("interface transceiver dom {} {} executing...".format(interface_name, desired_config)) + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(config_db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + port_table_entry = config_db.get_entry("PORT", interface_name) + if not port_table_entry: + ctx.fail("Interface {} does not exist".format(interface_name)) + + # We are handling port configuration only for the below mentioned scenarios + # Port is a non-breakout port (subport = 0 or subport field is absent in CONFIG_DB) + # Port is first subport of breakout ports (subport = 1) + # If the port is not in the above mentioned scenarios, then fail the command + if port_table_entry.get("subport", '0') not in DOM_CONFIG_SUPPORTED_SUBPORTS: + ctx.fail("DOM monitoring config only supported for subports {}".format(DOM_CONFIG_SUPPORTED_SUBPORTS)) + else: + config_db.mod_entry("PORT", interface_name, {"dom_polling": "disabled" if desired_config == "disable" else "enabled"}) + # # 'mpls' subgroup ('config interface mpls ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 7ded58a8725..aecab082b91 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -5331,6 +5331,22 @@ This command is used to reset an SFP transceiver Resetting port Ethernet0... OK ``` +**config interface transceiver dom** + +This command is used to configure the Digital Optical Monitoring (DOM) for an interface. + +- Usage: + ``` + config interface transceiver dom (enable | disable) + ``` + +- Examples: + ``` + user@sonic~$ sudo config interface transceiver dom Ethernet0 enable + + user@sonic~$ sudo config interface transceiver dom Ethernet0 disable + ``` + **config interface mtu (Versions >= 201904)** This command is used to configure the mtu for the Physical interface. Use the value 1500 for setting max transfer unit size to 1500 bytes. diff --git a/tests/config_xcvr_test.py b/tests/config_xcvr_test.py index 5043aa89d5c..6e05996db10 100644 --- a/tests/config_xcvr_test.py +++ b/tests/config_xcvr_test.py @@ -1,3 +1,4 @@ +from unittest.mock import patch import click import config.main as config import operator @@ -47,6 +48,25 @@ def test_config_tx_power(self, ctx): result = self.basic_check("tx_power", ["PortChannel0001", "11.3"], ctx, operator.ne) assert 'Invalid port PortChannel0001' in result.output + @patch("config.main.ConfigDBConnector.get_entry") + def test_dom(self, mock_get_entry, ctx): + interface_name = 'Ethernet0' + desired_config = 'enable' + + result = self.basic_check("dom", ["", desired_config], ctx, operator.ne) + assert "Interface name is invalid. Please enter a valid interface name!!" in result.output + + mock_get_entry.return_value = None + result = self.basic_check("dom", [interface_name, desired_config], ctx, operator.ne) + assert "Interface {} does not exist".format(interface_name) in result.output + + mock_get_entry.return_value = {'subport': '2'} + result = self.basic_check("dom", [interface_name, desired_config], ctx, operator.ne) + assert "DOM monitoring config only supported for subports {}".format(config.DOM_CONFIG_SUPPORTED_SUBPORTS) in result.output + + mock_get_entry.return_value = {'subport': '1'} + result = self.basic_check("dom", [interface_name, desired_config], ctx) + def basic_check(self, command_name, para_list, ctx, op=operator.eq, expect_result=0): runner = CliRunner() result = runner.invoke(config.config.commands["interface"].commands["transceiver"].commands[command_name], para_list, obj = ctx) From 24683b0c522990dfd757a3dd07ea050f96f24f2e Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Fri, 8 Mar 2024 13:32:15 +0800 Subject: [PATCH 11/45] [show] multi-asic show running test residue (#3198) --- tests/show_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/show_test.py b/tests/show_test.py index 077005b2201..4cd29ac45e5 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -113,6 +113,7 @@ def teardown_class(cls): bgp_util.run_bgp_command = cls._old_run_bgp_command os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" # change back to single asic config from .mock_tables import dbconnector from .mock_tables import mock_single_asic From 0b41a560b5993d1d5f01720f2663afb85c47b44c Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Sat, 9 Mar 2024 06:00:04 +0800 Subject: [PATCH 12/45] [config] Add YANG alerting for override (#3188) #### What I did Add alerting for YANG validation when load_minigraph during override. This is to alert early if golden config is invalid which will breaks GCU feature. #### How I did it Add alerting when `is_yang_config_validation_enabled` is not set during load_minigraph with override #### How to verify it Unit test --- config/main.py | 21 ++++++++++++++ .../multi_asic_dm_rm.json | 11 -------- .../multi_asic_feature_rm.json | 11 ++++++++ .../multi_asic_macsec_ov.json | 15 ++++++++-- .../multi_asic_missing_asic.json | 4 +-- tests/config_override_test.py | 28 +++++++++++++------ 6 files changed, 65 insertions(+), 25 deletions(-) delete mode 100644 tests/config_override_input/multi_asic_dm_rm.json create mode 100644 tests/config_override_input/multi_asic_feature_rm.json diff --git a/config/main.py b/config/main.py index 6c0b525ca4c..113c0ed57c9 100644 --- a/config/main.py +++ b/config/main.py @@ -1979,6 +1979,18 @@ def override_config_table(db, input_config_db, dry_run): validate_config_by_cm(cm, ns_config_input, "config_input") # Validate updated whole config validate_config_by_cm(cm, updated_config, "updated_config") + else: + cm = None + try: + # YANG validate of config minigraph generated + cm = ConfigMgmt(configdb=config_db) + cm.validateConfigData() + except Exception as ex: + log.log_warning("Failed to validate running config. Alerting: {}".format(ex)) + + # YANG validate config of minigraph generated overriden by golden config + if cm: + validate_config_by_cm_alerting(cm, updated_config, "updated_config") if dry_run: print(json.dumps(updated_config, sort_keys=True, @@ -1997,6 +2009,15 @@ def validate_config_by_cm(cm, config_json, jname): sys.exit(1) +def validate_config_by_cm_alerting(cm, config_json, jname): + tmp_config_json = copy.deepcopy(config_json) + try: + cm.loadData(tmp_config_json) + cm.validateConfigData() + except Exception as ex: + log.log_warning("Failed to validate {}. Alerting: {}".format(jname, ex)) + + def override_config_db(config_db, config_input): # Deserialized golden config to DB recognized format sonic_cfggen.FormatConverter.to_deserialized(config_input) diff --git a/tests/config_override_input/multi_asic_dm_rm.json b/tests/config_override_input/multi_asic_dm_rm.json deleted file mode 100644 index a4c0dd5fa7c..00000000000 --- a/tests/config_override_input/multi_asic_dm_rm.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "localhost": { - "DEVICE_METADATA": {} - }, - "asic0": { - "DEVICE_METADATA": {} - }, - "asic1": { - "DEVICE_METADATA": {} - } -} diff --git a/tests/config_override_input/multi_asic_feature_rm.json b/tests/config_override_input/multi_asic_feature_rm.json new file mode 100644 index 00000000000..b29cdf952f7 --- /dev/null +++ b/tests/config_override_input/multi_asic_feature_rm.json @@ -0,0 +1,11 @@ +{ + "localhost": { + "FEATURE": {} + }, + "asic0": { + "FEATURE": {} + }, + "asic1": { + "FEATURE": {} + } +} diff --git a/tests/config_override_input/multi_asic_macsec_ov.json b/tests/config_override_input/multi_asic_macsec_ov.json index ba86f6ef606..9a4a5c478ce 100644 --- a/tests/config_override_input/multi_asic_macsec_ov.json +++ b/tests/config_override_input/multi_asic_macsec_ov.json @@ -2,21 +2,30 @@ "localhost": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } }, "asic0": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } }, "asic1": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } } diff --git a/tests/config_override_input/multi_asic_missing_asic.json b/tests/config_override_input/multi_asic_missing_asic.json index db8ba8ec80a..a1eeb27f262 100644 --- a/tests/config_override_input/multi_asic_missing_asic.json +++ b/tests/config_override_input/multi_asic_missing_asic.json @@ -1,8 +1,8 @@ { "localhost": { - "DEVICE_METADATA": {} + "FEATURE": {} }, "asic0": { - "DEVICE_METADATA": {} + "FEATURE": {} } } diff --git a/tests/config_override_test.py b/tests/config_override_test.py index 86360d1d9e9..a46be5ef603 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -23,7 +23,7 @@ GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") -MULTI_ASIC_DEVICE_METADATA_RM = os.path.join(DATA_DIR, "multi_asic_dm_rm.json") +MULTI_ASIC_FEATURE_RM = os.path.join(DATA_DIR, "multi_asic_feature_rm.json") MULTI_ASIC_DEVICE_METADATA_GEN_SYSINFO = os.path.join(DATA_DIR, "multi_asic_dm_gen_sysinfo.json") MULTI_ASIC_MISSING_LOCALHOST_OV = os.path.join(DATA_DIR, "multi_asic_missing_localhost.json") MULTI_ASIC_MISSING_ASIC_OV = os.path.join(DATA_DIR, "multi_asic_missing_asic.json") @@ -105,7 +105,9 @@ def read_json_file_side_effect(filename): ['golden_config_db.json', '--dry-run']) assert result.exit_code == 0 - assert json.loads(result.output) == current_config + start_pos = result.output.find('{') + json_text = result.output[start_pos:] + assert json.loads(json_text) == current_config def test_golden_config_db_empty(self): db = Db() @@ -308,7 +310,15 @@ def read_json_file_side_effect(filename): # The profile_content was copied from MULTI_ASIC_MACSEC_OV, where all # ns sharing the same content: {"profile": {"key": "value"}} - profile_content = {"profile": {"key": "value"}} + profile_content = { + "profile": { + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" + + } + } with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): @@ -320,16 +330,16 @@ def read_json_file_side_effect(filename): for ns, config_db in cfgdb_clients.items(): assert config_db.get_config()['MACSEC_PROFILE'] == profile_content - def test_device_metadata_table_rm(self): + def test_feature_table_rm(self): def read_json_file_side_effect(filename): - with open(MULTI_ASIC_DEVICE_METADATA_RM, "r") as f: - device_metadata = json.load(f) - return device_metadata + with open(MULTI_ASIC_FEATURE_RM, "r") as f: + feature = json.load(f) + return feature db = Db() cfgdb_clients = db.cfgdb_clients for ns, config_db in cfgdb_clients.items(): - assert 'DEVICE_METADATA' in config_db.get_config() + assert 'FEATURE' in config_db.get_config() with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): @@ -339,7 +349,7 @@ def read_json_file_side_effect(filename): assert result.exit_code == 0 for ns, config_db in cfgdb_clients.items(): - assert 'DEVICE_METADATA' not in config_db.get_config() + assert 'FEATURE' not in config_db.get_config() def test_device_metadata_keep_sysinfo(self): def read_json_file_side_effect(filename): From b879b6588200bb6a89436511dfb4135de2d0a1e7 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Sat, 9 Mar 2024 11:40:01 +0800 Subject: [PATCH 13/45] [Bug] Fix fw_setenv illegel character issue (#3201) What I did The bug can be reproduced by the following command: root@bjw-can-7215-6:/home/admin# sonic-installer set-fips Command: /usr/bin/fw_setenv linuxargs net.ifnames=0 loopfstype=squashfs loop=image-fips-armhf-202305.88981472-dde4d1d844/fs.squashfs systemd.unified_cgroup_hierarchy=0 varlog_size=4096 loglevel=4 logs_inram=on sonic_fips=1 Error: illegal character '=' in variable name "loopfstype=squashfs" How I did it It is to set the variable linuxargs to the the environment value cmdline, as the part of the Linux Kernel Cmdline. The environment variable cannot be split. --- sonic_installer/bootloader/uboot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index 0490a482163..9e83f8edd7d 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -89,7 +89,7 @@ def set_fips(self, image, enable): cmdline = out.strip() cmdline = re.sub('^linuxargs=', '', cmdline) cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips - run_command(['/usr/bin/fw_setenv', 'linuxargs'] + split(cmdline)) + run_command(['/usr/bin/fw_setenv', 'linuxargs', cmdline]) click.echo('Done') def get_fips(self, image): From 7466dc4af5f8227948e713f8c4fb3f25dfc50a1d Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sun, 10 Mar 2024 17:58:10 -0700 Subject: [PATCH 14/45] Skip the validation of action in acl-loader if capability table in STATE_DB is empty (#3199) * Add skip_action_validation option to acl-loader --- acl_loader/main.py | 28 +++++++++++++++++----------- config/main.py | 2 +- tests/acl_loader_test.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 12 deletions(-) diff --git a/acl_loader/main.py b/acl_loader/main.py index e81e05d9b71..f73f3eb0399 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -413,7 +413,7 @@ def parse_acl_json(filename): raise AclLoaderException("Invalid input file %s" % filename) return yang_acl - def load_rules_from_file(self, filename): + def load_rules_from_file(self, filename, skip_action_validation=False): """ Load file with ACL rules configuration in openconfig ACL format. Convert rules to Config DB schema. @@ -421,9 +421,9 @@ def load_rules_from_file(self, filename): :return: """ self.yang_acl = AclLoader.parse_acl_json(filename) - self.convert_rules() + self.convert_rules(skip_action_validation) - def convert_action(self, table_name, rule_idx, rule): + def convert_action(self, table_name, rule_idx, rule, skip_validation=False): rule_props = {} if rule.actions.config.forwarding_action == "ACCEPT": @@ -452,13 +452,13 @@ def convert_action(self, table_name, rule_idx, rule): raise AclLoaderException("Unknown rule action {} in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) - if not self.validate_actions(table_name, rule_props): + if not self.validate_actions(table_name, rule_props, skip_validation): raise AclLoaderException("Rule action {} is not supported in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) return rule_props - def validate_actions(self, table_name, action_props): + def validate_actions(self, table_name, action_props, skip_validation=False): if self.is_table_control_plane(table_name): return True @@ -481,6 +481,11 @@ def validate_actions(self, table_name, action_props): else: aclcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.ACL_STAGE_CAPABILITY_TABLE, stage.upper())) switchcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) + # In the load_minigraph path, it's possible that the STATE_DB entry haven't pop up because orchagent is stopped + # before loading acl.json. So we skip the validation if any table is empty + if skip_validation and (not aclcapability or not switchcapability): + warning("Skipped action validation as capability table is not present in STATE_DB") + return True for action_key in dict(action_props): action_list_key = self.ACL_ACTIONS_CAPABILITY_FIELD if action_list_key not in aclcapability: @@ -699,7 +704,7 @@ def validate_rule_fields(self, rule_props): if ("ICMPV6_TYPE" in rule_props or "ICMPV6_CODE" in rule_props) and protocol != 58: raise AclLoaderException("IP_PROTOCOL={} is not ICMPV6, but ICMPV6 fields were provided".format(protocol)) - def convert_rule_to_db_schema(self, table_name, rule): + def convert_rule_to_db_schema(self, table_name, rule, skip_action_validation=False): """ Convert rules format from openconfig ACL to Config DB schema :param table_name: ACL table name to which rule belong @@ -729,7 +734,7 @@ def convert_rule_to_db_schema(self, table_name, rule): elif self.is_table_l3(table_name): rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) - deep_update(rule_props, self.convert_action(table_name, rule_idx, rule)) + deep_update(rule_props, self.convert_action(table_name, rule_idx, rule, skip_action_validation)) deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_icmp(table_name, rule_idx, rule)) @@ -761,7 +766,7 @@ def deny_rule(self, table_name): return {} # Don't add default deny rule if table is not [L3, L3V6] return rule_data - def convert_rules(self): + def convert_rules(self, skip_aciton_validation=False): """ Convert rules in openconfig ACL format to Config DB schema :return: @@ -780,7 +785,7 @@ def convert_rules(self): for acl_entry_name in acl_set.acl_entries.acl_entry: acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name] try: - rule = self.convert_rule_to_db_schema(table_name, acl_entry) + rule = self.convert_rule_to_db_schema(table_name, acl_entry, skip_aciton_validation) deep_update(self.rules_info, rule) except AclLoaderException as ex: error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex)) @@ -1149,8 +1154,9 @@ def update(ctx): @click.option('--session_name', type=click.STRING, required=False) @click.option('--mirror_stage', type=click.Choice(["ingress", "egress"]), default="ingress") @click.option('--max_priority', type=click.INT, required=False) +@click.option('--skip_action_validation', is_flag=True, default=False, help="Skip action validation") @click.pass_context -def full(ctx, filename, table_name, session_name, mirror_stage, max_priority): +def full(ctx, filename, table_name, session_name, mirror_stage, max_priority, skip_action_validation): """ Full update of ACL rules configuration. If a table_name is provided, the operation will be restricted in the specified table. @@ -1168,7 +1174,7 @@ def full(ctx, filename, table_name, session_name, mirror_stage, max_priority): if max_priority: acl_loader.set_max_priority(max_priority) - acl_loader.load_rules_from_file(filename) + acl_loader.load_rules_from_file(filename, skip_action_validation) acl_loader.full_update() diff --git a/config/main.py b/config/main.py index 113c0ed57c9..cef96647e9d 100644 --- a/config/main.py +++ b/config/main.py @@ -1759,7 +1759,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, update_sonic_environment() if os.path.isfile('/etc/sonic/acl.json'): - clicommon.run_command(['acl-loader', 'update', 'full', '/etc/sonic/acl.json'], display_cmd=True) + clicommon.run_command(['acl-loader', 'update', 'full', '/etc/sonic/acl.json', '--skip_action_validation'], display_cmd=True) # Load port_config.json try: diff --git a/tests/acl_loader_test.py b/tests/acl_loader_test.py index 599e47461ad..01dc8602d51 100644 --- a/tests/acl_loader_test.py +++ b/tests/acl_loader_test.py @@ -56,6 +56,36 @@ def test_validate_mirror_action(self, acl_loader): assert acl_loader.validate_actions("DATAACL", forward_packet_action) assert not acl_loader.validate_actions("DATAACL", drop_packet_action) + def test_load_rules_when_capability_table_is_empty(self, acl_loader): + """ + Test case to verify that acl_loader can still load dataplane acl rules when skip_action_validation + is true, and capability table in state_db is absent + """ + # Backup and empty the capability table from state_db + SWITCH_CAPABILITY = "SWITCH_CAPABILITY|switch" + if acl_loader.per_npu_statedb: + statedb = list(acl_loader.per_npu_statedb.values())[0] + else: + statedb = acl_loader.statedb + switchcapability = statedb.get_all("STATE_DB", SWITCH_CAPABILITY) + statedb.delete("STATE_DB", SWITCH_CAPABILITY) + try: + acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json'), skip_action_validation=True) + assert acl_loader.rules_info[("DATAACL", "RULE_2")] + assert acl_loader.rules_info[("DATAACL", "RULE_2")] == { + "VLAN_ID": 369, + "ETHER_TYPE": "2048", + "IP_PROTOCOL": 6, + "SRC_IP": "20.0.0.2/32", + "DST_IP": "30.0.0.3/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9998" + } + finally: + # Restore the capability table in state_db + for key, value in switchcapability.items(): + statedb.set("STATE_DB", SWITCH_CAPABILITY, key, value) + def test_vlan_id_translation(self, acl_loader): acl_loader.rules_info = {} acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json')) From 1a9261cefe6744774ba631a0b66dbd2d21ca9c9b Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Wed, 13 Mar 2024 11:44:56 -0700 Subject: [PATCH 15/45] [Techsupport]Handle SAI kv pair if present in sai common profile (#3196) ### What I did Handle the SAI key value pair if it is present in sai common profile rather than specific profile for mellanox platforms. The concept of common sai profile is introduced in https://github.com/sonic-net/sonic-buildimage/pull/18074 . After this the techsupport started to fail because of the absence of SAI_DUMP_STORE_PATH #### How I did it Check if the variable is not present in platform specific file and then read the common file. If the common file is not accessible due to syncd being down, fallback to default path which is hardcoded. #### How to verify it Running techsupport and ensuring it exits with code 0 --- scripts/generate_dump | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 64a8917252d..b08616444ea 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1266,8 +1266,19 @@ collect_mellanox_dfw_dumps() { trap 'handle_error $? $LINENO' ERR local platform=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_platform())") local hwsku=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_hwsku())") + local def_dump_path="/var/log/mellanox/sdk-dumps" local sdk_dump_path=`cat /usr/share/sonic/device/${platform}/${hwsku}/sai.profile|grep "SAI_DUMP_STORE_PATH"|cut -d = -f2` + if [ -z $sdk_dump_path ]; then + # If the SAI_DUMP_STORE_PATH is not found in device specific sai profile, check in common sai profile + sdk_dump_path=`docker exec syncd cat /etc/mlnx/sai-common.profile | grep "SAI_DUMP_STORE_PATH" |cut -d = -f2` + if [ -z $sdk_dump_path ]; then + # If the above two mechanisms fail e.g. when syncd is not running , fallback to default sdk dump path + sdk_dump_path=$def_dump_path + fi + fi + + if [[ ! -d $sdk_dump_path ]]; then # This would mean the SAI_DUMP_STORE_PATH is not mounted on the host and is only accessible though the container # This is a bad design and not recommended But there is nothing which restricts against it and thus the special handling From 9d53201725f950876b69971caab98b56e3fda6aa Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Wed, 13 Mar 2024 23:20:51 -0400 Subject: [PATCH 16/45] [chassis][show-runningconfig] Fix the show runningconfiguration all issue on the Supervisor (#3194) Signed-off-by: mlok --- show/main.py | 3 ++- utilities_common/bgp_util.py | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/show/main.py b/show/main.py index c1995ad27df..2d331e7da15 100755 --- a/show/main.py +++ b/show/main.py @@ -1437,7 +1437,8 @@ def all(verbose): ns_list = multi_asic.get_namespace_list() for ns in ns_list: ns_config = get_config_json_by_namespace(ns) - ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) + if bgp_util.is_bgp_feature_state_enabled(ns): + ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) output[ns] = ns_config click.echo(json.dumps(output, indent=4)) else: diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 64054662e32..65f9a594963 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -38,6 +38,15 @@ def is_bgp_neigh_present(neighbor_ip, namespace=multi_asic.DEFAULT_NAMESPACE): return False +def is_bgp_feature_state_enabled(namespace=multi_asic.DEFAULT_NAMESPACE): + config_db = multi_asic.connect_config_db_for_ns(namespace) + bgp= config_db.get_entry("FEATURE","bgp") + if "state" in bgp: + if bgp["state"] == "enabled": + return True + return False + + def is_ipv4_address(ip_address): """ Checks if given ip is ipv4 From 125f36f3fb87c6687b0906a94d7533ffa6e0835b Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Mon, 18 Mar 2024 00:05:59 -0700 Subject: [PATCH 17/45] [ipintutil]Handle exception in show ip interfaces command (#3182) ### What I did Handle exception in show ip interfaces command when executed during config reload. Sometimes during config reload the interfaces are removed and if show ip interfaces was executed during this time we may get the below traceback. The interface would exist when the call multi_asic_get_ip_intf_from_ns was made but would have been removed in the subsequent for loop which tries to get ip interface data for each interface ``` show ip interfaces Traceback (most recent call last): File "/usr/local/bin/ipintutil", line 276, in main() File "/usr/local/bin/ipintutil", line 269, in main ip_intfs = get_ip_intfs(af, namespace, display) File "/usr/local/bin/ipintutil", line 232, in get_ip_intfs ip_intfs_in_ns = get_ip_intfs_in_namespace(af, namespace, display) File "/usr/local/bin/ipintutil", line 153, in get_ip_intfs_in_namespace ipaddresses = multi_asic_util.multi_asic_get_ip_intf_addr_from_ns(namespace, iface) File "/usr/local/lib/python3.9/dist-packages/utilities_common/multi_asic.py", line 186, in multi_asic_get_ip_intf_addr_from_ns ipaddresses = netifaces.ifaddresses(iface) ValueError: You must specify a valid interface name. ``` #### How I did it Adding try exception block so that if an interface is not present, it would be skipped. #### How to verify it Running show ip interface command and performing config reload in parallel --- scripts/ipintutil | 6 +++++- tests/mock_tables/mock_single_asic.py | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/scripts/ipintutil b/scripts/ipintutil index 5535bce7e7f..85879972e07 100755 --- a/scripts/ipintutil +++ b/scripts/ipintutil @@ -29,6 +29,7 @@ try: mock_tables.dbconnector.load_namespace_config() else: import mock_tables.mock_single_asic + mock_tables.mock_single_asic.add_unknown_intf=True except KeyError: pass @@ -150,7 +151,10 @@ def get_ip_intfs_in_namespace(af, namespace, display): ip_intf_attr = [] if namespace != constants.DEFAULT_NAMESPACE and skip_ip_intf_display(iface, display): continue - ipaddresses = multi_asic_util.multi_asic_get_ip_intf_addr_from_ns(namespace, iface) + try: + ipaddresses = multi_asic_util.multi_asic_get_ip_intf_addr_from_ns(namespace, iface) + except ValueError: + continue if af in ipaddresses: ifaddresses = [] bgp_neighs = {} diff --git a/tests/mock_tables/mock_single_asic.py b/tests/mock_tables/mock_single_asic.py index 08c2157c9de..ac97c4bc79a 100644 --- a/tests/mock_tables/mock_single_asic.py +++ b/tests/mock_tables/mock_single_asic.py @@ -4,6 +4,8 @@ from sonic_py_common import multi_asic from utilities_common import multi_asic as multi_asic_util +add_unknown_intf=False + mock_intf_table = { '': { 'eth0': { @@ -60,6 +62,8 @@ def mock_single_asic_get_ip_intf_from_ns(namespace): interfaces = [] try: interfaces = list(mock_intf_table[namespace].keys()) + if add_unknown_intf: + interfaces.append("unknownintf") except KeyError: pass return interfaces @@ -70,7 +74,8 @@ def mock_single_asic_get_ip_intf_addr_from_ns(namespace, iface): try: ipaddresses = mock_intf_table[namespace][iface] except KeyError: - pass + if add_unknown_intf: + raise ValueError("Unknow interface") return ipaddresses From d4688a8f8b00d2299e159ec25dc26dda0536f7d8 Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Tue, 19 Mar 2024 22:54:45 +0200 Subject: [PATCH 18/45] [graceful reboot] Add the pre_reboot_hook script execution, add the watchdog arm before the reboot (#3203) Signed-off-by: vadymhlushko-mlnx --- scripts/reboot | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index 2d1cd8a87c9..b5b6a7a585a 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -1,4 +1,10 @@ #!/bin/bash + +declare -r EXIT_SUCCESS=0 +declare -r EXIT_ERROR=1 +declare -r WATCHDOG_UTIL="/usr/local/bin/watchdogutil" +declare -r PRE_REBOOT_HOOK="pre_reboot_hook" + DEVPATH="/usr/share/sonic/device" PLAT_REBOOT="platform_reboot" PLATFORM_UPDATE_REBOOT_CAUSE="platform_update_reboot_cause" @@ -34,6 +40,8 @@ PLATFORM_FWUTIL_AU_REBOOT_HANDLE="platform_fw_au_reboot_handle" REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" TAG_LATEST=no +REBOOT_FLAGS="" +FORCE_REBOOT="no" function debug() { @@ -121,9 +129,8 @@ function show_help_and_exit() echo " " echo " Available options:" echo " -h, -? : getting this help" - echo " -f : execute reboot force" - exit 0 + exit ${EXIT_SUCCESS} } function setup_reboot_variables() @@ -166,13 +173,13 @@ function check_conflict_boot_in_fw_update() FW_AU_TASK_FILE=$(compgen -G ${FW_AU_TASK_FILE_REGEX}) || true if [[ -n "${FW_AU_TASK_FILE}" ]] && [[ ! -f "${FW_AU_TASK_FILE_EXP}" ]]; then VERBOSE=yes debug "Firmware auto update scheduled for a different reboot: ${FW_AU_TASK_FILE}" - exit 1 + exit ${EXIT_ERROR} fi } function parse_options() { - while getopts "h?vf" opt; do + while getopts "h?v" opt; do case ${opt} in h|\? ) show_help_and_exit @@ -183,6 +190,10 @@ function parse_options() t ) TAG_LATEST=no ;; + f ) + REBOOT_FLAGS+=" -f" + FORCE_REBOOT="yes" + ;; esac done } @@ -192,7 +203,7 @@ parse_options $@ # Exit if not superuser if [[ "$EUID" -ne 0 ]]; then echo "This command must be run as root" >&2 - exit 1 + exit ${EXIT_ERROR} fi debug "User requested rebooting device ..." @@ -242,6 +253,23 @@ if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_UPDATE_REBOOT_CAUSE} ]; then ${DEVPATH}/${PLATFORM}/${PLATFORM_UPDATE_REBOOT_CAUSE} fi +if [ -x ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} ]; then + debug "Executing the pre-reboot script" + ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} + EXIT_CODE=$? + if [[ ${EXIT_CODE} != ${EXIT_SUCCESS} ]]; then + if [[ "${FORCE_REBOOT}" != "yes" ]]; then + echo "Reboot is interrupted: use -f (force) to override" + exit ${EXIT_ERROR} + fi + fi +fi + +if [ -x ${WATCHDOG_UTIL} ]; then + debug "Enabling the Watchdog before reboot" + ${WATCHDOG_UTIL} arm +fi + if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then VERBOSE=yes debug "Rebooting with platform ${PLATFORM} specific tool ..." ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} $@ @@ -260,4 +288,4 @@ if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then fi VERBOSE=yes debug "Issuing OS-level reboot ..." >&2 -exec /sbin/reboot $@ +exec /sbin/reboot ${REBOOT_FLAGS} From d8541add5a8b0d42ce9365e1c1d923322d41e3f6 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Tue, 19 Mar 2024 22:53:17 -0700 Subject: [PATCH 19/45] Update port2alias (#3217) Basically port2alias Cli became broken on multi-asic platforms after introduction of sonic-net/sonic-buildimage#10960 which removed the initialization of global DB config from portconfig.py (library side) and expects application to do it, but here application side (port2alias) was not updated accordingly. How I did it Add load_db_config call to port2alias for initialization --- scripts/port2alias | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/port2alias b/scripts/port2alias index 5a4ff8dd79f..c993890699a 100755 --- a/scripts/port2alias +++ b/scripts/port2alias @@ -7,6 +7,7 @@ from io import StringIO from portconfig import get_port_config from sonic_py_common import device_info from sonic_py_common import multi_asic +from utilities_common.general import load_db_config # mock the redis for unit test purposes # try: @@ -50,6 +51,7 @@ def translate_line(line, ports): def main(): (platform, hwsku) = device_info.get_platform_and_hwsku() ports = {} + load_db_config() for ns in multi_asic.get_namespace_list(): (ports_ns, _, _) = get_port_config(hwsku=hwsku, platform=platform, asic_name=ns) ports.update(ports_ns) From c149e48b8fb3179a9ea31b595a601605f19f9d03 Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:54:23 -0700 Subject: [PATCH 20/45] [chassis] Add chassis support for CLI "config qos reload" (#3233) * Add support for chassis in qos reload --- config/main.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index cef96647e9d..80cd094dbd4 100644 --- a/config/main.py +++ b/config/main.py @@ -2994,7 +2994,14 @@ def _qos_update_ports(ctx, ports, dry_run, json_data): for table_name in tables_multi_index: entries = config_db.get_keys(table_name) for key in entries: - port, _ = key + # Add support for chassis/multi-dut: + # on a single-dut, key = ('Ethernet136', '6') + # while on a chassis, key = ('str2-chassis-lcx-1', 'Asic0', 'Ethernet84', '5') + for element in key: + if element.startswith('Eth'): + port = element + break + assert port is not None, "Port is not found in config DB" if not port in portset_to_handle: continue config_db.set_entry(table_name, '|'.join(key), None) From 3c489ba5b6adb3ffae3ee5a8c68a969b41bdb6f3 Mon Sep 17 00:00:00 2001 From: Deepak Singhal <115033986+deepak-singhal0408@users.noreply.github.com> Date: Fri, 22 Mar 2024 16:02:50 -0700 Subject: [PATCH 21/45] Enhance route-check for multi-asic platforms (#3216) ### What I did Enhanced route_check.py script to cover multi-asic platforms. Accordingly enhanced the test files as well. MSFT ADO: 25416673 #### How I did it Enhanced the route_check.py script to take additional optional parameter(--n/namespace). Without this parameter, the check will be run on all asics in multi-asic platforms. Different connections to DBs are modified accordingly to connect to relevant ns dbs. Result will be encapsulated under different namespace. For single asic, results will be displayed under Default-Namespace(""). testData and the testfiles are enhanced accordinly. #### How to verify it 1. Verified that all pytest UT cases are passing. 2. Verified the route_check_test.sh script on single asic and multi-asic platforms. 3. Verified Monit routecheck outputs by simulating a failure scenario on both single asic and multi-asic platforms. output from Monit Check: **Single Asic:** xxx/usr/local/bin# monit status routecheck Monit 5.20.0 uptime: 1d 20h 32m Program 'routeCheck' status Status failed monitoring status Monitored monitoring mode active on reboot start last exit value 255 last output Failure results: {{ "": { "missed_ROUTE_TABLE_routes": [ "20c0:d9b8:99:80::/64" ] } }} Failed. Look at reported mismatches above add: { "": [] } del: { "": [] } data collected Tue, 12 Dec 2023 20:30:11 ''' **Multi Asic:** ''' /bin# monit status routecheck Monit 5.20.0 uptime: 1d 23h 51m Program 'routeCheck' status Status failed monitoring status Monitored monitoring mode active on reboot start last exit value 255 last output Failure results: {{ "asic0": { "missed_ROUTE_TABLE_routes": [ "1.0.0.0/16" ] }, "asic1": { "missed_ROUTE_TABLE_routes": [ "1.0.0.0/16" ] }, "asic2": { "missed_ROUTE_TABLE_routes": [ "1.0.0.0/16" ] } }} Failed. Look at reported mismatches above add: { "asic0": [], "asic1": [], "asic2": [] } del: { "asic0": [], "asic1": [], "asic2": [] } data collected Tue, 12 Dec 2023 23:54:23 ''' --- scripts/route_check.py | 220 +++--- scripts/route_check_test.sh | 125 +++- tests/route_check_test.py | 190 +++-- tests/route_check_test_data.py | 1182 +++++++++++++++++++++----------- 4 files changed, 1069 insertions(+), 648 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 4346d733fc0..5349acd0afc 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -50,6 +50,8 @@ from ipaddress import ip_network from swsscommon import swsscommon from utilities_common import chassis +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config APPL_DB_NAME = 'APPL_DB' ASIC_DB_NAME = 'ASIC_DB' @@ -76,6 +78,8 @@ FRR_CHECK_RETRIES = 3 FRR_WAIT_TIME = 15 +REDIS_TIMEOUT_MSECS = 0 + class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -276,12 +280,12 @@ def is_vrf(k): return k.startswith("Vrf") -def get_routes(): +def get_appdb_routes(namespace): """ helper to read route table from APPL-DB. :return list of sorted routes with prefix ensured """ - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for routes") tbl = swsscommon.Table(db, 'ROUTE_TABLE') keys = tbl.getKeys() @@ -298,15 +302,15 @@ def get_routes(): return sorted(valid_rt) -def get_route_entries(): +def get_asicdb_routes(namespace): """ helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ - db = swsscommon.DBConnector(ASIC_DB_NAME, 0) + db = swsscommon.DBConnector(ASIC_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) subs = swsscommon.SubscriberStateTable(db, ASIC_TABLE_NAME) - print_message(syslog.LOG_DEBUG, "ASIC DB connected") + print_message(syslog.LOG_DEBUG, "ASIC DB {} connected".format(namespace)) rt = [] while True: @@ -324,37 +328,42 @@ def get_route_entries(): return (selector, subs, sorted(rt)) -def is_suppress_fib_pending_enabled(): +def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise """ - cfg_db = swsscommon.ConfigDBConnector() - cfg_db.connect() - + cfg_db = multi_asic.connect_config_db_for_ns(namespace) state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') return state == 'enabled' -def get_frr_routes(): +def get_frr_routes(namespace): """ Read routes from zebra through CLI command :return frr routes dictionary """ + if namespace == multi_asic.DEFAULT_NAMESPACE: + v4_route_cmd = ['show', 'ip', 'route', 'json'] + v6_route_cmd = ['show', 'ipv6', 'route', 'json'] + else: + v4_route_cmd = ['show', 'ip', 'route', '-n', namespace, 'json'] + v6_route_cmd = ['show', 'ipv6', 'route', '-n', namespace, 'json'] - output = subprocess.check_output('show ip route json', shell=True) + output = subprocess.check_output(v4_route_cmd, text=True) routes = json.loads(output) - output = subprocess.check_output('show ipv6 route json', shell=True) + output = subprocess.check_output(v6_route_cmd, text=True) routes.update(json.loads(output)) + print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, routes)) return routes -def get_interfaces(): +def get_interfaces(namespace): """ helper to read interface table from APPL-DB. :return sorted list of IP addresses with added prefix """ - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for interfaces") tbl = swsscommon.Table(db, 'INTF_TABLE') keys = tbl.getKeys() @@ -374,20 +383,20 @@ def get_interfaces(): return sorted(intf) -def filter_out_local_interfaces(keys): +def filter_out_local_interfaces(namespace, keys): """ helper to filter out local interfaces :param keys: APPL-DB:ROUTE_TABLE Routes to check. :return keys filtered out of local """ rt = [] - local_if_lst = {'eth0', 'docker0'} + local_if_lst = {'eth0', 'eth1', 'docker0'} #eth1 is added to skip route installed in AAPL_DB on packet-chassis local_if_lo = [r'tun0', r'lo', r'Loopback\d+'] chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) tbl = swsscommon.Table(db, 'ROUTE_TABLE') for k in keys: @@ -407,20 +416,20 @@ def filter_out_local_interfaces(keys): return rt -def filter_out_voq_neigh_routes(keys): +def filter_out_voq_neigh_routes(namespace, keys): """ helper to filter out voq neigh routes. These are the routes statically added for the voq neighbors. We skip writing route entries in asic db for these. We filter out reporting error on all the host routes written on inband interface prefixed with "Ethernte-IB" - :param keys: APPL-DB:ROUTE_TABLE Routes to check. + :param namespace: Asic namespace, keys: APPL-DB:ROUTE_TABLE Routes to check. :return keys filtered out for voq neigh routes """ rt = [] local_if_re = [r'Ethernet-IB\d+'] - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) tbl = swsscommon.Table(db, 'ROUTE_TABLE') for k in keys: @@ -452,13 +461,13 @@ def filter_out_default_routes(lst): return upd -def filter_out_vnet_routes(routes): +def filter_out_vnet_routes(namespace, routes): """ Helper to filter out VNET routes :param routes: list of routes to filter :return filtered list of routes. """ - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', REDIS_TIMEOUT_MSECS, True, namespace) vnet_route_table = swsscommon.Table(db, 'VNET_ROUTE_TABLE') vnet_route_tunnel_table = swsscommon.Table(db, 'VNET_ROUTE_TUNNEL_TABLE') @@ -488,14 +497,14 @@ def is_dualtor(config_db): return subtype.lower() == 'dualtor' -def filter_out_standalone_tunnel_routes(routes): - config_db = swsscommon.ConfigDBConnector() - config_db.connect() +def filter_out_standalone_tunnel_routes(namespace, routes): + + config_db = multi_asic.connect_config_db_for_ns(namespace) if not is_dualtor(config_db): return routes - app_db = swsscommon.DBConnector('APPL_DB', 0) + app_db = swsscommon.DBConnector('APPL_DB', REDIS_TIMEOUT_MSECS, True, namespace) neigh_table = swsscommon.Table(app_db, 'NEIGH_TABLE') neigh_keys = neigh_table.getKeys() standalone_tunnel_route_ips = [] @@ -525,18 +534,17 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes -def check_frr_pending_routes(): +def check_frr_pending_routes(namespace): """ Check FRR routes for offload flag presence by executing "show ip route json" Returns a list of routes that have no offload flag. """ missed_rt = [] - retries = FRR_CHECK_RETRIES for i in range(retries): missed_rt = [] - frr_routes = get_frr_routes() + frr_routes = get_frr_routes(namespace) for _, entries in frr_routes.items(): for entry in entries: @@ -559,11 +567,11 @@ def check_frr_pending_routes(): break time.sleep(FRR_WAIT_TIME) - + print_message(syslog.LOG_DEBUG, "FRR missed routes: {}".format(missed_rt, indent=4)) return missed_rt -def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): +def mitigate_installed_not_offloaded_frr_routes(namespace, missed_frr_rt, rt_appl): """ Mitigate installed but not offloaded FRR routes. @@ -575,7 +583,7 @@ def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but given that this script ensures the route is installed in the hardware it will automitigate such a bug. """ - db = swsscommon.DBConnector('APPL_STATE_DB', 0) + db = swsscommon.DBConnector('APPL_STATE_DB', REDIS_TIMEOUT_MSECS, True, namespace) response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) @@ -598,7 +606,7 @@ def get_soc_ips(config_db): return soc_ips -def filter_out_soc_ip_routes(routes): +def filter_out_soc_ip_routes(namespace, routes): """ Ignore ASIC only routes for SOC IPs @@ -608,8 +616,7 @@ def filter_out_soc_ip_routes(routes): will use the kernel routing table), but still provide connectivity to any external traffic in case of a link issue (since this traffic will be forwarded by the ASIC). """ - config_db = swsscommon.ConfigDBConnector() - config_db.connect() + config_db = multi_asic.connect_config_db_for_ns(namespace) if not is_dualtor(config_db): return routes @@ -618,7 +625,7 @@ def filter_out_soc_ip_routes(routes): if not soc_ips: return routes - + updated_routes = [] for route in routes: if route not in soc_ips: @@ -627,9 +634,9 @@ def filter_out_soc_ip_routes(routes): return updated_routes -def get_vlan_neighbors(): +def get_vlan_neighbors(namespace): """Return a list of VLAN neighbors.""" - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for neighbors") tbl = swsscommon.Table(db, 'NEIGH_TABLE') neigh_entries = tbl.getKeys() @@ -645,7 +652,7 @@ def get_vlan_neighbors(): return valid_neighs -def filter_out_vlan_neigh_route_miss(rt_appl_miss, rt_asic_miss): +def filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss): """Ignore any route miss for vlan neighbor IPs.""" def _filter_out_neigh_route(routes, neighs): @@ -658,12 +665,10 @@ def _filter_out_neigh_route(routes, neighs): updated_routes.append(route) return updated_routes, ignored_routes - config_db = swsscommon.ConfigDBConnector() - config_db.connect() + config_db = multi_asic.connect_config_db_for_ns(namespace) - print_message(syslog.LOG_DEBUG, "Ignore vlan neighbor route miss") if is_dualtor(config_db): - vlan_neighs = set(get_vlan_neighbors()) + vlan_neighs = set(get_vlan_neighbors(namespace)) rt_appl_miss, ignored_rt_appl_miss = _filter_out_neigh_route(rt_appl_miss, vlan_neighs) print_message(syslog.LOG_DEBUG, "Ignored appl route miss:", json.dumps(ignored_rt_appl_miss, indent=4)) rt_asic_miss, ignored_rt_asic_miss = _filter_out_neigh_route(rt_asic_miss, vlan_neighs) @@ -672,7 +677,7 @@ def _filter_out_neigh_route(routes, neighs): return rt_appl_miss, rt_asic_miss -def check_routes(): +def check_routes(namespace): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. @@ -691,85 +696,102 @@ def check_routes(): :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ - intf_appl_miss = [] - rt_appl_miss = [] - rt_asic_miss = [] - rt_frr_miss = [] + namespace_list = [] + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): + namespace_list.append(namespace) + else: + namespace_list = multi_asic.get_namespace_list() + print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) results = {} - adds = [] - deletes = [] + adds = {} + deletes = {} + for namespace in namespace_list: + intf_appl_miss = [] + rt_appl_miss = [] + rt_asic_miss = [] + rt_frr_miss = [] + adds[namespace] = [] + deletes[namespace] = [] + + selector, subs, rt_asic = get_asicdb_routes(namespace) - selector, subs, rt_asic = get_route_entries() + rt_appl = get_appdb_routes(namespace) + intf_appl = get_interfaces(namespace) - rt_appl = get_routes() - intf_appl = get_interfaces() + # Diff APPL-DB routes & ASIC-DB routes + rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) - # Diff APPL-DB routes & ASIC-DB routes - rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) + # Check missed ASIC routes against APPL-DB INTF_TABLE + _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) + rt_asic_miss = filter_out_default_routes(rt_asic_miss) + rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) - # Check missed ASIC routes against APPL-DB INTF_TABLE - _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) - rt_asic_miss = filter_out_default_routes(rt_asic_miss) - rt_asic_miss = filter_out_vnet_routes(rt_asic_miss) - rt_asic_miss = filter_out_standalone_tunnel_routes(rt_asic_miss) - rt_asic_miss = filter_out_soc_ip_routes(rt_asic_miss) - # Check APPL-DB INTF_TABLE with ASIC table route entries - intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) + # Check APPL-DB INTF_TABLE with ASIC table route entries + intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) - if rt_appl_miss: - rt_appl_miss = filter_out_local_interfaces(rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_voq_neigh_routes(rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) - # NOTE: On dualtor environment, ignore any route miss for the - # neighbors learned from the vlan subnet. - if rt_appl_miss or rt_asic_miss: - rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(rt_appl_miss, rt_asic_miss) + # NOTE: On dualtor environment, ignore any route miss for the + # neighbors learned from the vlan subnet. + if rt_appl_miss or rt_asic_miss: + rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) - if rt_appl_miss or rt_asic_miss: - # Look for subscribe updates for a second - adds, deletes = get_subscribe_updates(selector, subs) + if rt_appl_miss or rt_asic_miss: + # Look for subscribe updates for a second + adds[namespace], deletes[namespace] = get_subscribe_updates(selector, subs) # Drop all those for which SET received - rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds) + rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds[namespace]) # Drop all those for which DEL received - rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes) + rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes[namespace]) - if rt_appl_miss: - results["missed_ROUTE_TABLE_routes"] = rt_appl_miss + if rt_appl_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_ROUTE_TABLE_routes"] = rt_appl_miss - if intf_appl_miss: - results["missed_INTF_TABLE_entries"] = intf_appl_miss + if intf_appl_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_INTF_TABLE_entries"] = intf_appl_miss - if rt_asic_miss: - results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + if rt_asic_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes() + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - results["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss + + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) - - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") - if is_suppress_fib_pending_enabled(): - mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) - return -1, results else: print_message(syslog.LOG_INFO, "All good!") return 0, None - def main(): """ main entry point, which mainly parses the args and call check_routes @@ -782,8 +804,18 @@ def main(): parser.add_argument('-m', "--mode", type=Level, choices=list(Level), default='ERR') parser.add_argument("-i", "--interval", type=int, default=0, help="Scan interval in seconds") parser.add_argument("-s", "--log_to_syslog", action="store_true", default=True, help="Write message to syslog") + parser.add_argument('-n','--namespace', default=multi_asic.DEFAULT_NAMESPACE, help='Verify routes for this specific namespace') args = parser.parse_args() + namespace = args.namespace + if namespace is not multi_asic.DEFAULT_NAMESPACE and not multi_asic.is_multi_asic(): + print_message(syslog.LOG_ERR, "Namespace option is not valid for a single-ASIC device") + return -1, None + + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace not in multi_asic.get_namespace_list(): + print_message(syslog.LOG_ERR, "Namespace option is not valid. Choose one of {}".format(multi_asic.get_namespace_list())) + return -1, None + set_level(args.mode, args.log_to_syslog) if args.interval: @@ -797,10 +829,12 @@ def main(): interval = 1 signal.signal(signal.SIGALRM, handler) + load_db_config() while True: signal.alarm(TIMEOUT_SECONDS) - ret, res= check_routes() + ret, res= check_routes(namespace) + print_message(syslog.LOG_DEBUG, "ret={}, res={}".format(ret, res)) signal.alarm(0) if interval: diff --git a/scripts/route_check_test.sh b/scripts/route_check_test.sh index 989cbfae0bf..b78351f7a67 100755 --- a/scripts/route_check_test.sh +++ b/scripts/route_check_test.sh @@ -2,36 +2,95 @@ # add a route, interface & route-entry to simulate error # -sonic-db-cli APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null -sonic-db-cli ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null -sonic-db-cli APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null - -echo "------" -echo "expect errors!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" - -sonic-db-cli APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null -sonic-db-cli ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null -sonic-db-cli APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null - -# add standalone tunnel route to simulate unreachable neighbor scenario on dual ToR -# in this scenario, we expect the route mismatch to be ignored -sonic-db-cli APPL_DB hmset "NEIGH_TABLE:Vlan1000:fc02:1000::99" "neigh" "00:00:00:00:00:00" "family" "IPv6" > /dev/null -sonic-db-cli ASIC_DB hmset 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x400000000167d" "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" "SAI_PACKET_ACTION_FORWARD" > /dev/null - -echo "------" -echo "expect success on dualtor, expect error on all other devices!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" - -sonic-db-cli APPL_DB del "NEIGH_TABLE:Vlan1000:fc02:1000::99" > /dev/null -sonic-db-cli ASIC_DB del 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' > /dev/null - -echo "------" -echo "expect success!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" + +CONFIG_FILE="/etc/sonic/config_db.json" +if [ ! -e "$CONFIG_FILE" ]; then + echo "File $CONFIG_FILE not found. returning.." + exit 1 +fi + +# Extract platform and hwsku from DEVICE_METADATA using awk +platform=$(awk -F'"' '/"DEVICE_METADATA":/,/\}/{if(/"platform":/) print $4}' "$CONFIG_FILE") + +# Print the values +echo "Platform: $platform" + +PLATFORM_DIR="/usr/share/sonic/device/$platform" +if [ ! -d "$PLATFORM_DIR" ]; then + echo "Directory $PLATFORM_DIR not found. returning.." + exit 1 +fi + +ASIC_CONF_FILE="$PLATFORM_DIR/asic.conf" +echo "$ASIC_CONF_FILE" +num_asic=1 + +# Check if asic.conf exists +if [ -f "$ASIC_CONF_FILE" ]; then + if grep -q "^NUM_ASIC=" "$ASIC_CONF_FILE"; then + # Extract the value of NUM_ASIC into a local variable + num_asic=$(grep "^NUM_ASIC=" "$ASIC_CONF_FILE" | cut -d'=' -f2) + else + # Print a message if NUM_ASIC is not present + echo "NUM_ASIC not found.. returning.." + exit 1 + fi +fi + +echo "num_asic: $num_asic" + +if [ "$num_asic" -gt 1 ]; then + # test on asic0 + # add a route, interface & route-entry to simulate error + # + sonic-db-cli -n asic0 APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null + sonic-db-cli -n asic0 ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null + sonic-db-cli -n asic0 APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null + + echo "------" + echo "expect errors!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli -n asic0 APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null + sonic-db-cli -n asic0 ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null + sonic-db-cli -n asic0 APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null + +else + # add a route, interface & route-entry to simulate error + # + sonic-db-cli APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null + sonic-db-cli ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null + sonic-db-cli APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null + + echo "------" + echo "expect errors!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null + sonic-db-cli ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null + sonic-db-cli APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null + + # add standalone tunnel route to simulate unreachable neighbor scenario on dual ToR + # in this scenario, we expect the route mismatch to be ignored + sonic-db-cli APPL_DB hmset "NEIGH_TABLE:Vlan1000:fc02:1000::99" "neigh" "00:00:00:00:00:00" "family" "IPv6" > /dev/null + sonic-db-cli ASIC_DB hmset 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x400000000167d" "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" "SAI_PACKET_ACTION_FORWARD" > /dev/null + + echo "------" + echo "expect success on dualtor, expect error on all other devices!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli APPL_DB del "NEIGH_TABLE:Vlan1000:fc02:1000::99" > /dev/null + sonic-db-cli ASIC_DB del 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' > /dev/null + + echo "------" + echo "expect success!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" +fi diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 3b38add9ff7..820f0621077 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -1,14 +1,17 @@ import copy from io import StringIO import json -import os import logging -import sys import syslog +import sys import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, APPL_STATE_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES +from tests.route_check_test_data import ( + APPL_DB, MULTI_ASIC, NAMESPACE, DEFAULTNS, ARGS, ASIC_DB, CONFIG_DB, + DEFAULT_CONFIG_DB, APPL_STATE_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, + UPD, FRR_ROUTES +) import pytest @@ -18,50 +21,37 @@ import route_check current_test_data = None - -tables_returned = {} selector_returned = None subscribers_returned = {} +db_conns = {} def set_test_case_data(ctdata): - """ - Setup global variables for each test case - """ - global current_test_data, tables_returned, selector_returned, subscribers_returned - + global current_test_data, db_conns, selector_returned, subscribers_returned current_test_data = ctdata - tables_returned = {} - selector_returned = None subscribers_returned = {} - def recursive_update(d, t): - assert (type(t) is dict) + assert type(t) is dict for k in t.keys(): if type(t[k]) is not dict: d.update(t) return - if k not in d: d[k] = {} recursive_update(d[k], t[k]) - class Table: - def __init__(self, db, tbl): self.db = db self.tbl = tbl - self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db, tbl])) - # print("Table:init: db={} tbl={} data={}".format(db, tbl, json.dumps(self.data, indent=4))) - + self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db["namespace"], db["name"], tbl])) def update(self): t = copy.deepcopy(self.get_val(current_test_data.get(UPD, {}), - [self.db, self.tbl, OP_SET])) + [self.db["namespace"], self.db["name"], self.tbl, OP_SET])) drop = copy.deepcopy(self.get_val(current_test_data.get(UPD, {}), - [self.db, self.tbl, OP_DEL])) + [self.db["namespace"], self.db["name"], self.tbl, OP_DEL])) if t: recursive_update(self.data, t) @@ -69,41 +59,41 @@ def update(self): self.data.pop(k, None) return (list(t.keys()), list(drop.keys())) - def get_val(self, d, keys): for k in keys: d = d[k] if k in d else {} return d - def getKeys(self): return list(self.data.keys()) - def get(self, key): ret = copy.deepcopy(self.data.get(key, {})) return (True, ret) - def hget(self, key, field): ret = copy.deepcopy(self.data.get(key, {}).get(field, {})) return True, ret +def conn_side_effect(arg, _1, _2, namespace): + return db_conns[namespace][arg] -db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "APPL_STATE_DB": APPL_STATE_DB } -def conn_side_effect(arg, _): - return db_conns[arg] - +def init_db_conns(namespaces): + for ns in namespaces: + db_conns[ns] = { + "APPL_DB": {"namespace": ns, "name": APPL_DB}, + "ASIC_DB": {"namespace": ns, "name": ASIC_DB}, + "APPL_STATE_DB": {"namespace": ns, "name": APPL_STATE_DB}, + "CONFIG_DB": ConfigDB(ns) + } def table_side_effect(db, tbl): - if not db in tables_returned: - tables_returned[db] = {} - if not tbl in tables_returned[db]: - tables_returned[db][tbl] = Table(db, tbl) - return tables_returned[db][tbl] + if not tbl in db.keys(): + db[tbl] = Table(db, tbl) + return db[tbl] -class mock_selector: +class MockSelector: TIMEOUT = 1 EMULATE_HANG = False @@ -111,21 +101,19 @@ def __init__(self): self.select_state = 0 self.select_cnt = 0 self.subs = None - # print("Mock Selector constructed") - + logger.debug("Mock Selector constructed") def addSelectable(self, subs): self.subs = subs return 0 - def select(self, timeout): # Toggle between good & timeout # state = self.select_state self.subs.update() - if mock_selector.EMULATE_HANG: + if MockSelector.EMULATE_HANG: time.sleep(60) if self.select_state == 0: @@ -136,29 +124,15 @@ def select(self, timeout): return (state, None) -class mock_db_conn: - def __init__(self, db): - self.db_name = None - for (k, v) in db_conns.items(): - if v == db: - self.db_name = k - assert self.db_name != None - - def getDbName(self): - return self.db_name - - -class mock_subscriber: +class MockSubscriber: def __init__(self, db, tbl): self.state = PRE self.db = db self.tbl = tbl - self.dbconn = mock_db_conn(db) self.mock_tbl = table_side_effect(self.db, self.tbl) self.set_keys = list(self.mock_tbl.data.keys()) self.del_keys = [] - def update(self): if self.state == PRE: s_keys, d_keys = self.mock_tbl.update() @@ -166,7 +140,6 @@ def update(self): self.del_keys += d_keys self.state = UPD - def pop(self): v = None if self.set_keys: @@ -180,61 +153,53 @@ def pop(self): k = "" op = "" - print("state={} k={} op={} v={}".format(self.state, k, op, str(v))) return (k, op, v) - - - def getDbConnector(self): - return self.dbconn - - - def getTableName(self): - return self.tbl - def subscriber_side_effect(db, tbl): global subscribers_returned - - key = "db_{}_tbl_{}".format(db, tbl) + key = "db_{}_{}_tbl_{}".format(db["namespace"], db["name"], tbl) if not key in subscribers_returned: - subscribers_returned[key] = mock_subscriber(db, tbl) + subscribers_returned[key] = MockSubscriber(db, tbl) return subscribers_returned[key] - def select_side_effect(): global selector_returned if not selector_returned: - selector_returned = mock_selector() + selector_returned = MockSelector() return selector_returned +def config_db_side_effect(namespace): + return db_conns[namespace]["CONFIG_DB"] -def table_side_effect(db, tbl): - if not db in tables_returned: - tables_returned[db] = {} - if not tbl in tables_returned[db]: - tables_returned[db][tbl] = Table(db, tbl) - return tables_returned[db][tbl] - +class ConfigDB: + def __init__(self, namespace): + self.namespace = namespace + self.name = CONFIG_DB + self.db = current_test_data.get(PRE, {}).get(namespace, {}).get(CONFIG_DB, DEFAULT_CONFIG_DB) if current_test_data is not None else DEFAULT_CONFIG_DB -def config_db_side_effect(table): - if CONFIG_DB not in current_test_data[PRE]: - return DEFAULT_CONFIG_DB[table] - if not CONFIG_DB in tables_returned: - tables_returned[CONFIG_DB] = {} - if not table in tables_returned[CONFIG_DB]: - tables_returned[CONFIG_DB][table] = current_test_data[PRE][CONFIG_DB].get(table, {}) - return tables_returned[CONFIG_DB][table] + def get_table(self, table): + return self.db.get(table, {}) + def get_entry(self, table, key): + return self.get_table(table).get(key, {}) def set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db): mock_conn.side_effect = conn_side_effect mock_table.side_effect = table_side_effect mock_sel.side_effect = select_side_effect mock_subs.side_effect = subscriber_side_effect - mock_config_db.get_table = MagicMock(side_effect=config_db_side_effect) + mock_config_db.side_effect = config_db_side_effect class TestRouteCheck(object): + @staticmethod + def extract_namespace_from_args(args): + # args: ['show', 'ip', 'route', '-n', 'asic0', 'json'], + for i, arg in enumerate(args): + if arg == "-n" and i + 1 < len(args): + return args[i + 1] + return DEFAULTNS + def setup(self): pass @@ -246,21 +211,20 @@ def init(self): def force_hang(self): old_timeout = route_check.TIMEOUT_SECONDS route_check.TIMEOUT_SECONDS = 5 - mock_selector.EMULATE_HANG = True + MockSelector.EMULATE_HANG = True yield route_check.TIMEOUT_SECONDS = old_timeout - mock_selector.EMULATE_HANG = False + MockSelector.EMULATE_HANG = False @pytest.fixture def mock_dbs(self): - mock_config_db = MagicMock() with patch("route_check.swsscommon.DBConnector") as mock_conn, \ patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ + patch("sonic_py_common.multi_asic.connect_config_db_for_ns") as mock_config_db, \ patch("route_check.swsscommon.NotificationProducer"): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) @@ -268,32 +232,40 @@ def mock_dbs(self): @pytest.mark.parametrize("test_num", TEST_DATA.keys()) def test_route_check(self, mock_dbs, test_num): + logger.debug("test_route_check: test_num={}".format(test_num)) self.init() ret = 0 - ct_data = TEST_DATA[test_num] set_test_case_data(ct_data) - logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) + self.run_test(ct_data) + def run_test(self, ct_data): with patch('sys.argv', ct_data[ARGS].split()), \ - patch('route_check.subprocess.check_output') as mock_check_output: + patch('sonic_py_common.multi_asic.get_namespace_list', return_value= ct_data[NAMESPACE]), \ + patch('sonic_py_common.multi_asic.is_multi_asic', return_value= ct_data[MULTI_ASIC]), \ + patch('route_check.subprocess.check_output', side_effect=lambda *args, **kwargs: self.mock_check_output(ct_data, *args, **kwargs)), \ + patch('route_check.mitigate_installed_not_offloaded_frr_routes', side_effect=lambda *args, **kwargs: None), \ + patch('route_check.load_db_config', side_effect=lambda: init_db_conns(ct_data[NAMESPACE])): + + ret, res = route_check.main() + self.assert_results(ct_data, ret, res) - routes = ct_data.get(FRR_ROUTES, {}) + def mock_check_output(self, ct_data, *args, **kwargs): + ns = self.extract_namespace_from_args(args[0]) + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) - def side_effect(*args, **kwargs): - return json.dumps(routes) + def assert_results(self, ct_data, ret, res): + expect_ret = ct_data.get(RET, 0) + expect_res = ct_data.get(RESULT, None) - mock_check_output.side_effect = side_effect + if res: + logger.debug("res={}".format(json.dumps(res, indent=4))) + if expect_res: + logger.debug("expect_res={}".format(json.dumps(expect_res, indent=4))) - ret, res = route_check.main() - expect_ret = ct_data[RET] if RET in ct_data else 0 - expect_res = ct_data[RESULT] if RESULT in ct_data else None - if res: - print("res={}".format(json.dumps(res, indent=4))) - if expect_res: - print("expect_res={}".format(json.dumps(expect_res, indent=4))) - assert ret == expect_ret - assert res == expect_res + assert ret == expect_ret + assert res == expect_res def test_timeout(self, mock_dbs, force_hang): # Test timeout @@ -324,9 +296,11 @@ def test_logging(self): assert len(msg) == 5 def test_mitigate_routes(self, mock_dbs): + namespace = DEFAULTNS missed_frr_rt = [ { 'prefix': '192.168.0.1', 'protocol': 'bgp' } ] rt_appl = [ '192.168.0.1' ] + init_db_conns([namespace]) with patch('sys.stdout', new_callable=StringIO) as mock_stdout: - route_check.mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl) + route_check.mitigate_installed_not_offloaded_frr_routes(namespace, missed_frr_rt, rt_appl) # Verify that the stdout are suppressed in this function assert not mock_stdout.getvalue() diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 9250c54ca90..50c6276f262 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -1,4 +1,6 @@ DESCR = "Description" +MULTI_ASIC = "multi_asic" +NAMESPACE = "namespace-list" ARGS = "args" RET = "return" APPL_DB = 0 @@ -9,6 +11,9 @@ UPD = "update" FRR_ROUTES = "frr-routes" RESULT = "res" +DEFAULTNS="" +ASIC0 = "asic0" +ASIC1 = "asic1" OP_SET = "SET" OP_DEL = "DEL" @@ -32,66 +37,76 @@ TEST_DATA = { "0": { DESCR: "basic good one", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "1": { DESCR: "With updates", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m DEBUG -i 1", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, UPD: { - ASIC_DB: { - RT_ENTRY_TABLE: { - OP_SET: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - }, - OP_DEL: { - RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + ASIC_DB: { + RT_ENTRY_TABLE: { + OP_SET: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + }, + OP_DEL: { + RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + } } } } @@ -99,506 +114,845 @@ }, "2": { DESCR: "basic failure one", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -i 15", RET: -1, PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:90.10.196.24/31": {}, + "PortChannel1023:9603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:90.10.196.24/31": {}, - "PortChannel1023:9603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "3603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "3603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.12/31", - "10.10.196.20/31" - ], - "missed_INTF_TABLE_entries": [ - "90.10.196.24/32", - "9603:10b0:503:df4::5d/128" - ], - "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ - "20.10.196.12/31", - "20.10.196.20/31", - "20.10.196.24/32", - "3603:10b0:503:df4::5d/128" - ] + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31", + "10.10.196.20/31" + ], + "missed_INTF_TABLE_entries": [ + "90.10.196.24/32", + "9603:10b0:503:df4::5d/128" + ], + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "20.10.196.12/31", + "20.10.196.20/31", + "20.10.196.24/32", + "3603:10b0:503:df4::5d/128" + ] + } } }, "3": { DESCR: "basic good one with no args", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "4": { DESCR: "Good one with routes on voq inband interface", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" }, - "10.10.197.1" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0"}, - "2603:10b0:503:df5::1" : { "ifname": "Ethernet-IB0", "nexthop": "::"}, - "100.0.0.2/32" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0" }, - "2064:100::2/128" : { "ifname": "Ethernet-IB0", "nexthop": "::" }, - "101.0.0.0/24" : { "ifname": "Ethernet-IB0", "nexthop": "100.0.0.2"} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" }, + "10.10.197.1" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0"}, + "2603:10b0:503:df5::1" : { "ifname": "Ethernet-IB0", "nexthop": "::"}, + "100.0.0.2/32" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0" }, + "2064:100::2/128" : { "ifname": "Ethernet-IB0", "nexthop": "::" }, + "101.0.0.0/24" : { "ifname": "Ethernet-IB0", "nexthop": "100.0.0.2"} + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {}, + "Ethernet-IB0:10.10.197.1/24": {}, + "Ethernet-IB0:2603:10b0:503:df5::1/64": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {}, - "Ethernet-IB0:10.10.197.1/24": {}, - "Ethernet-IB0:2603:10b0:503:df5::1/64": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.197.1/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df5::1/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "101.0.0.0/24" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.197.1/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df5::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "101.0.0.0/24" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "5": { DESCR: "local route with nexthop - fail", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", RET: -1, PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo", "nexthop": "100.0.0.2" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo", "nexthop": "100.0.0.2" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.30/31" - ] + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.30/31" + ] + } } }, "6": { DESCR: "Good one with VNET routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - VNET_ROUTE_TABLE: { - "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, - "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, - "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {}, + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {}, - "Vlan3001": { "vnet_name": "Vnet1" }, - "Vlan3001:30.1.10.1/24": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "30.1.10.1/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "30.1.10.1/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "7": { DESCR: "dualtor standalone tunnel route case", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} - } - }, - APPL_DB: { - NEIGH_TABLE: { - "Vlan1000:fc02:1000::99": { "neigh": "00:00:00:00:00:00", "family": "IPv6"} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "fc02:1000::99/128" + RT_ENTRY_KEY_SUFFIX: {}, + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} + } + }, + APPL_DB: { + NEIGH_TABLE: { + "Vlan1000:fc02:1000::99": { "neigh": "00:00:00:00:00:00", "family": "IPv6"} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "fc02:1000::99/128" + RT_ENTRY_KEY_SUFFIX: {}, + } } } } }, "8": { DESCR: "Good one with VRF routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "Vrf1:0.0.0.0/0" : { "ifname": "portchannel0" }, - "Vrf1:10.10.196.12/31" : { "ifname": "portchannel0" }, - "Vrf1:10.10.196.20/31" : { "ifname": "portchannel0" } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "Vrf1:0.0.0.0/0" : { "ifname": "portchannel0" }, + "Vrf1:10.10.196.12/31" : { "ifname": "portchannel0" }, + "Vrf1:10.10.196.20/31" : { "ifname": "portchannel0" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "9": { DESCR: "SOC IPs on Libra ToRs should be ignored", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} - }, - MUX_CABLE: { - "Ethernet4": { - "cable_type": "active-active", - "server_ipv4": "192.168.0.2/32", - "server_ipv6": "fc02:1000::2/128", - "soc_ipv4": "192.168.0.3/32", - "soc_ipv6": "fc02:1000::3/128", - "state": "auto" + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} }, - } - }, - APPL_DB: { - ROUTE_TABLE: { - "192.168.0.2/32": {"ifname": "tun0"}, - "fc02:1000::2/128": {"ifname": "tun0"} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "192.168.0.2/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "fc02:1000::2/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.3/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "fc02:1000::3/128" + RT_ENTRY_KEY_SUFFIX: {} + MUX_CABLE: { + "Ethernet4": { + "cable_type": "active-active", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::2/128", + "soc_ipv4": "192.168.0.3/32", + "soc_ipv6": "fc02:1000::3/128", + "state": "auto" + }, + } + }, + APPL_DB: { + ROUTE_TABLE: { + "192.168.0.2/32": {"ifname": "tun0"}, + "fc02:1000::2/128": {"ifname": "tun0"} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "192.168.0.2/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "fc02:1000::2/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.3/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "fc02:1000::3/128" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "10": { DESCR: "basic good one, check FRR routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + } }, }, "11": { DESCR: "failure test case, missing FRR routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - }, - ], - "1.1.1.0/24": [ - { - "prefix": "1.1.1.0/24", - "vrfName": "default", - "protocol": "static", - "selected": True, - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + }, + ], + "1.1.1.0/24": [ + { + "prefix": "1.1.1.0/24", + "vrfName": "default", + "protocol": "static", + "selected": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, }, RESULT: { - "missed_FRR_routes": [ - {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True}, - {"prefix": "1.1.1.0/24", "vrfName": "default", "protocol": "static", "selected": True}, - ], + DEFAULTNS: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True}, + {"prefix": "1.1.1.0/24", "vrfName": "default", "protocol": "static", "selected": True}, + ], + }, }, RET: -1, }, "12": { DESCR: "skip bgp routes offloaded check, if not selected as best", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, }, }, "13": { DESCR: "basic good one with IPv6 address", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + }, + INTF_TABLE: { + "PortChannel1013:2000:31:0:0::1/64": {}, + } }, - INTF_TABLE: { - "PortChannel1013:2000:31:0:0::1/64": {}, - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + } } } } }, "14": { DESCR: "dualtor ignore vlan neighbor route miss case", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -i 15", RET: -1, PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} + } + }, + APPL_DB: { + ROUTE_TABLE: { + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "192.168.0.101/32": { "ifname": "tun0" }, + "192.168.0.103/32": { "ifname": "tun0" }, + }, + INTF_TABLE: { + "PortChannel1013:90.10.196.24/31": {}, + "PortChannel1023:9603:10b0:503:df4::5d/126": {}, + }, + NEIGH_TABLE: { + "Vlan1000:192.168.0.100": {}, + "Vlan1000:192.168.0.101": {}, + "Vlan1000:192.168.0.102": {}, + "Vlan1000:192.168.0.103": {}, + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.101/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.102/32" + RT_ENTRY_KEY_SUFFIX: {}, + } } - }, - APPL_DB: { - ROUTE_TABLE: { - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "192.168.0.101/32": { "ifname": "tun0" }, - "192.168.0.103/32": { "ifname": "tun0" }, + } + }, + RESULT: { + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31", + "10.10.196.20/31" + ], + "missed_INTF_TABLE_entries": [ + "90.10.196.24/32", + "9603:10b0:503:df4::5d/128" + ], + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "20.10.196.12/31", + "20.10.196.20/31", + "20.10.196.24/32", + ] + } + } + }, + "15": { + DESCR: "basic good one on multi-asic on a particular asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic0 -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - INTF_TABLE: { - "PortChannel1013:90.10.196.24/31": {}, - "PortChannel1023:9603:10b0:503:df4::5d/126": {}, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + } + } + }, + "16": { + DESCR: "basic good one on multi-asic on all asics", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } }, - NEIGH_TABLE: { - "Vlan1000:192.168.0.100": {}, - "Vlan1000:192.168.0.101": {}, - "Vlan1000:192.168.0.102": {}, - "Vlan1000:192.168.0.103": {}, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.101/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.102/32" + RT_ENTRY_KEY_SUFFIX: {}, + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + } + }, + "17": { + DESCR: "simple failure case on multi-asic on a particular asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic0 -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.12/31", - "10.10.196.20/31" - ], - "missed_INTF_TABLE_entries": [ - "90.10.196.24/32", - "9603:10b0:503:df4::5d/128" - ], - "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ - "20.10.196.12/31", - "20.10.196.20/31", - "20.10.196.24/32", - ] - } + ASIC0: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31" + ], + } + }, + RET: -1, + }, + "18": { + DESCR: "simple failure case on multi-asic on all asics", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + }, + RESULT: { + ASIC0: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31" + ], + }, + ASIC1: { + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "10.10.196.12/31" + ], + }, + }, + RET: -1, }, + "19": { + DESCR: "validate namespace input on multi-asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n random -m INFO -i 1000", + RET: -1, + }, + "20": { + DESCR: "validate namespace input on single-asic", + MULTI_ASIC: False, + NAMESPACE: [''], + ARGS: "route_check -n random -m INFO -i 1000", + RET: -1, + }, + "21": { + DESCR: "multi-asic failure test case, missing FRR routes", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic1 -m INFO -i 1000", + PRE: { + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + ASIC1: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, + }, + RESULT: { + ASIC1: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True} + ], + }, + }, + RET: -1, + }, + } From 04a33e1f62338220cfe954418e500f1a2e42cd69 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Mon, 25 Mar 2024 12:12:32 -0700 Subject: [PATCH 22/45] Add "state" field in CONFIG_DB a toggle of the fabric port monitor feature (#2932) * Add "state" field in CONFIG_DB fabric_monitor table as a toggle of the fabric port monitoring feature. The command to set this is "config fabric port monitor state " --------- Signed-off-by: Jie Feng --- config/fabric.py | 33 +++++++++++++++ doc/Command-Reference.md | 15 +++++++ tests/config_fabric_test.py | 58 ++++++++++++++++++++++++++ tests/mock_tables/asic0/config_db.json | 7 ++++ tests/mock_tables/asic1/config_db.json | 7 ++++ 5 files changed, 120 insertions(+) diff --git a/config/fabric.py b/config/fabric.py index a3870589ae3..16ce35f7330 100644 --- a/config/fabric.py +++ b/config/fabric.py @@ -157,6 +157,39 @@ def error_threshold(crccells, rxcells, namespace): config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", {'monErrThreshCrcCells': crccells, 'monErrThreshRxCells': rxcells}) +def setFabricPortMonitorState(state, namespace, ctx): + """ set the fabric port monitor state""" + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {'monState': state}) + +# +# 'config fabric port montior state ' +# +@monitor.command() +@click.argument('state', metavar='', required=True) +@multi_asic_util.multi_asic_click_option_namespace +def state(state, namespace): + """FABRIC PORT MONITOR STATE configuration tasks""" + ctx = click.get_current_context() + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ns_list = multi_asic.get_namespace_list() + for namespace in ns_list: + setFabricPortMonitorState(state, namespace, ctx) + else: + setFabricPortMonitorState(state, namespace, ctx) + # # 'config fabric port monitor poll ...' # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index aecab082b91..a38d27e3c47 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3846,6 +3846,21 @@ This command sets the number of consecutive polls in which no error is detected admin@sonic:~$ config fabric port monitor poll threshold recovery 5 -n asic0 ``` +**config fabric port monitor state ** + +This command sets the monitor state in CONFIG_DB to enable/disable the fabric monitor feature. + +- Usage: + ``` + config fabric port monitor state [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port monitor state enable + admin@sonic:~$ config fabric port monitor state disable + ``` + ## Feature SONiC includes a capability in which Feature state can be enabled/disabled diff --git a/tests/config_fabric_test.py b/tests/config_fabric_test.py index 1f56ea416ac..ca8a8b8a097 100644 --- a/tests/config_fabric_test.py +++ b/tests/config_fabric_test.py @@ -4,6 +4,7 @@ import os import pytest import sys +import importlib from click.testing import CliRunner from utilities_common.db import Db @@ -93,3 +94,60 @@ def test_config_fabric_monitor_threshold(self, ctx): result = self.basic_check("port", ["monitor", "poll", "threshold", "recovery", "8"], ctx) expect_result = 0 assert operator.eq(result.exit_code, expect_result) + + def test_config_fabric_monitor_state(self, ctx): + # Issue command "config fabric port monitor state " + result = self.basic_check("port", ["monitor", "state", "enable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + result = self.basic_check("port", ["monitor", "state", "disable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + +class TestMultiAsicConfigFabric(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def basic_check(self, command_name, para_list, ctx): + # This function issues command of "config fabric xxxx", + # and returns the result of the command. + runner = CliRunner() + result = runner.invoke(config.config.commands["fabric"].commands[command_name], para_list, obj = ctx) + print(result.output) + return result + + def test_multi_config_fabric_monitor_state(self, ctx): + result = self.basic_check("port", ["monitor", "state", "disable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + @classmethod + def teardown_class(cls): + print("TEARDOWN_TEST") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index ffee9478f34..4ecc654c326 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -288,5 +288,12 @@ "ports@": "Ethernet124", "type": "L3", "stage": "ingress" + }, + "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshIsolation": "1", + "monPollThreshRecovery": "8" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 1cded681491..95cf040544d 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -227,5 +227,12 @@ "holdtime": "10", "asn": "65200", "keepalive": "3" + }, + "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshIsolation": "1", + "monPollThreshRecovery": "8" } } From e35452b777323a0c2f0aa623fbcd90ef7860784a Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Mon, 25 Mar 2024 14:19:28 -0700 Subject: [PATCH 23/45] Modify "show interface transceiver status" CLI to show SW cmis state (#3238) Signed-off-by: Mihir Patel --- tests/mock_tables/state_db.json | 1 + tests/sfp_test.py | 1 + utilities_common/sfp_helper.py | 1 + 3 files changed, 3 insertions(+) diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index b266b5e8345..b622705be1e 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -374,6 +374,7 @@ "rx_sig_power_max": "40" }, "TRANSCEIVER_STATUS|Ethernet44":{ + "cmis_state": "READY", "DP1State": "DataPathActivated", "DP2State": "DataPathActivated", "DP3State": "DataPathActivated", diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 37a025a35c9..22eda401e57 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -304,6 +304,7 @@ test_qsfp_dd_status_output = """\ Ethernet44: + CMIS State (SW): READY Tx fault flag on media lane 1: False Tx fault flag on media lane 2: False Tx fault flag on media lane 3: False diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index 09a96ca2ea1..4a6ad65b573 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -47,6 +47,7 @@ # For non-CMIS, only first 1 or 4 lanes are applicable. # For CMIS, all 8 lanes are applicable. QSFP_STATUS_MAP = { + 'cmis_state': 'CMIS State (SW)', 'txfault1': 'Tx fault flag on media lane 1', 'txfault2': 'Tx fault flag on media lane 2', 'txfault3': 'Tx fault flag on media lane 3', From bdc57206cdff258a63907bb43c5845fe9ac57386 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 28 Mar 2024 09:35:07 +0800 Subject: [PATCH 24/45] Revert "Fix for Switch Port Modes and VLAN CLI Enhancement (#3108)" (#3246) Reverts sonic-net/sonic-utilities#3108 --- config/main.py | 40 +- config/switchport.py | 137 ---- config/vlan.py | 352 ++++------ doc/Command-Reference.md | 153 ----- scripts/db_migrator.py | 38 +- show/interfaces/__init__.py | 67 -- .../config_db/port-an-expected.json | 3 - .../config_db/portchannel-expected.json | 5 - .../config_db/switchport-expected.json | 144 ---- .../config_db/switchport-input.json | 138 ---- tests/db_migrator_test.py | 28 - tests/interfaces_test.py | 100 --- tests/ipv6_link_local_test.py | 2 +- tests/mock_tables/asic0/config_db.json | 1 - tests/mock_tables/config_db.json | 35 +- tests/vlan_test.py | 630 +----------------- utilities_common/cli.py | 156 +---- 17 files changed, 141 insertions(+), 1888 deletions(-) delete mode 100644 config/switchport.py delete mode 100644 tests/db_migrator_input/config_db/switchport-expected.json delete mode 100644 tests/db_migrator_input/config_db/switchport-input.json diff --git a/config/main.py b/config/main.py index 80cd094dbd4..a068a1b7f4d 100644 --- a/config/main.py +++ b/config/main.py @@ -57,7 +57,6 @@ from .config_mgmt import ConfigMgmtDPB, ConfigMgmt from . import mclag from . import syslog -from . import switchport from . import dns # mock masic APIs for unit test @@ -106,7 +105,6 @@ PORT_SPEED = "speed" PORT_TPID = "tpid" DEFAULT_TPID = "0x8100" -PORT_MODE= "switchport_mode" DOM_CONFIG_SUPPORTED_SUBPORTS = ['0', '1'] @@ -1215,9 +1213,6 @@ def config(ctx): # DNS module config.add_command(dns.dns) -# Switchport module -config.add_command(switchport.switchport) - @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @@ -4591,40 +4586,19 @@ def add(ctx, interface_name, ip_addr, gw): if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port + vlan_member_table = config_db.get_table('VLAN_MEMBER') + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') if interface_is_in_portchannel(portchannel_member_table, interface_name): ctx.fail("{} is configured as a member of portchannel." .format(interface_name)) - - - # Add a validation to check this interface is in routed mode before - # assigning an IP address to it - - sub_intf = False - if clicommon.is_valid_port(config_db, interface_name): - is_port = True - elif clicommon.is_valid_portchannel(config_db, interface_name): - is_port = False - else: - sub_intf = True - - if not sub_intf: - interface_mode = "routed" - if is_port: - interface_data = config_db.get_entry('PORT',interface_name) - elif not is_port: - interface_data = config_db.get_entry('PORTCHANNEL',interface_name) - - if "mode" in interface_data: - interface_mode = interface_data["mode"] - - if interface_mode != "routed": - ctx.fail("Interface {} is not in routed mode!".format(interface_name)) - return - - try: ip_address = ipaddress.ip_interface(ip_addr) except ValueError as err: diff --git a/config/switchport.py b/config/switchport.py deleted file mode 100644 index a714f9427ff..00000000000 --- a/config/switchport.py +++ /dev/null @@ -1,137 +0,0 @@ -import click -from .utils import log -import utilities_common.cli as clicommon - -# -# 'switchport' mode ('config switchport ...') -# - - -@click.group(cls=clicommon.AbbreviationGroup, name='switchport') -def switchport(): - """Switchport mode configuration tasks""" - pass - - -@switchport.command("mode") -@click.argument("type", metavar="", required=True, type=click.Choice(["access", "trunk", "routed"])) -@click.argument("port", metavar="port", required=True) -@clicommon.pass_db -def switchport_mode(db, type, port): - """switchport mode help commands.Mode_type can be access or trunk or routed""" - - ctx = click.get_current_context() - - log.log_info("'switchport mode {} {}' executing...".format(type, port)) - mode_exists_status = True - - # checking if port name with alias exists - if clicommon.get_interface_naming_mode() == "alias": - alias = port - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - port = iface_alias_converter.alias_to_name(port) - if port is None: - ctx.fail("cannot find port name for alias {}".format(alias)) - - if clicommon.is_port_mirror_dst_port(db.cfgdb, port): - ctx.fail("{} is configured as mirror destination port".format(port)) - - - if clicommon.is_valid_port(db.cfgdb, port): - is_port = True - elif clicommon.is_valid_portchannel(db.cfgdb, port): - is_port = False - else: - ctx.fail("{} does not exist".format(port)) - - portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') - - if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): - ctx.fail("{} is part of portchannel!".format(port)) - - if is_port: - port_data = db.cfgdb.get_entry('PORT',port) - else: - port_data = db.cfgdb.get_entry('PORTCHANNEL',port) - - # mode type is either access or trunk - if type != "routed": - - if "mode" in port_data: - existing_mode = port_data["mode"] - else: - existing_mode = "routed" - mode_exists_status = False - if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ - (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): - ctx.fail("Remove IP from {} to change mode!".format(port)) - - if existing_mode == "routed": - if mode_exists_status: - # if the port in an interface - if is_port: - db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) - # if not port then is a port channel - elif not is_port: - db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) - - if not mode_exists_status: - port_data["mode"] = type - if is_port: - db.cfgdb.set_entry("PORT", port, port_data) - # if not port then is a port channel - elif not is_port: - db.cfgdb.set_entry("PORTCHANNEL", port, port_data) - - if existing_mode == type: - ctx.fail("{} is already in the {} mode".format(port,type)) - else: - if existing_mode == "access" and type == "trunk": - pass - if existing_mode == "trunk" and type == "access": - if clicommon.interface_is_tagged_member(db.cfgdb,port): - ctx.fail("{} is in {} mode and have tagged member(s).\nRemove tagged member(s) from {} to switch to {} mode".format(port,existing_mode,port,type)) - if is_port: - db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) - # if not port then is a port channel - elif not is_port: - db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) - - click.echo("{} switched from {} to {} mode".format(port, existing_mode, type)) - - # if mode type is routed - else: - - if clicommon.interface_is_tagged_member(db.cfgdb,port): - ctx.fail("{} has tagged member(s). \nRemove them to change mode to {}".format(port,type)) - - if clicommon.interface_is_untagged_member(db.cfgdb,port): - ctx.fail("{} has untagged member. \nRemove it to change mode to {}".format(port,type)) - - if "mode" in port_data: - existing_mode = port_data["mode"] - else: - existing_mode = "routed" - mode_exists_status = False - - if not mode_exists_status: - port_data["mode"] = type - if is_port: - db.cfgdb.set_entry("PORT", port, port_data) - - # if not port then is a port channel - elif not is_port: - db.cfgdb.set_entry("PORTCHANNEL", port, port_data) - pass - - elif mode_exists_status and existing_mode == type: - ctx.fail("{} is already in {} mode".format(port,type)) - - else: - if is_port: - db.cfgdb.mod_entry("PORT", port, {"mode": "{}".format(type)}) - # if not port then is a port channel - elif not is_port: - db.cfgdb.mod_entry("PORTCHANNEL", port, {"mode": "{}".format(type)}) - - click.echo("{} switched from {} to {} mode".format(port,existing_mode,type)) diff --git a/config/vlan.py b/config/vlan.py index 121a854c32d..7ace1d6d5f3 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -32,53 +32,28 @@ def is_dhcp_relay_running(): @vlan.command('add') -@click.argument('vid', metavar='', required=True) -@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") +@click.argument('vid', metavar='', required=True, type=int) @clicommon.pass_db -def add_vlan(db, vid, multiple): +def add_vlan(db, vid): """Add VLAN""" ctx = click.get_current_context() + vlan = 'Vlan{}'.format(vid) config_db = ValidatedConfigDBConnector(db.cfgdb) - - vid_list = [] - # parser will parse the vid input if there are syntax errors it will throw error - if multiple: - vid_list = clicommon.multiple_vlan_parser(ctx, vid) - else: - if not vid.isdigit(): - ctx.fail("{} is not integer".format(vid)) - vid_list.append(int(vid)) - if ADHOC_VALIDATION: + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) - # loop will execute till an exception occurs - for vid in vid_list: - - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) - - #Multiple VLANs need to be referenced - vlan = 'Vlan{}'.format(vid) + if vid == 1: + ctx.fail("{} is default VLAN".format(vlan)) # TODO: MISSING CONSTRAINT IN YANG MODEL - # default vlan checker - if vid == 1: - # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is default VLAN.".format(vlan)) - - log.log_info("'vlan add {}' executing...".format(vid)) - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): - log.log_info("{} already exists".format(vlan)) - ctx.fail("{} already exists, Aborting!!!".format(vlan)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): - ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) - - # set dhcpv4_relay table - set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} already exists".format(vlan)) + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): + ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) def is_dhcpv6_relay_config_exist(db, vlan_name): @@ -99,74 +74,57 @@ def delete_db_entry(entry_name, db_connector, db_name): @vlan.command('del') -@click.argument('vid', metavar='', required=True) -@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") +@click.argument('vid', metavar='', required=True, type=int) @click.option('--no_restart_dhcp_relay', is_flag=True, type=click.BOOL, required=False, default=False, help="If no_restart_dhcp_relay is True, do not restart dhcp_relay while del vlan and \ require dhcpv6 relay of this is empty") @clicommon.pass_db -def del_vlan(db, vid, multiple, no_restart_dhcp_relay): +def del_vlan(db, vid, no_restart_dhcp_relay): """Delete VLAN""" + log.log_info("'vlan del {}' executing...".format(vid)) + ctx = click.get_current_context() + vlan = 'Vlan{}'.format(vid) + if no_restart_dhcp_relay: + if is_dhcpv6_relay_config_exist(db, vlan): + ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) - vid_list = [] - # parser will parse the vid input if there are syntax errors it will throw error - if multiple: - vid_list = clicommon.multiple_vlan_parser(ctx, vid) - else: - if not vid.isdigit(): - ctx.fail("{} is not integer".format(vid)) - vid_list.append(int(vid)) - config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: - for vid in vid_list: - log.log_info("'vlan del {}' executing...".format(vid)) - - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) - - #Multiple VLANs needs to be referenced - vlan = 'Vlan{}'.format(vid) - - #Multiple VLANs needs to be checked - if no_restart_dhcp_relay: - if is_dhcpv6_relay_config_exist(db, vlan): - ctx.fail("Can't delete {} because related DHCPv6 Relay config is exist".format(vlan)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - log.log_info("{} does not exist".format(vlan)) - ctx.fail("{} does not exist, Aborting!!!".format(vlan)) - - intf_table = db.cfgdb.get_table('VLAN_INTERFACE') - for intf_key in intf_table: - if ((type(intf_key) is str and intf_key == 'Vlan{}'.format(vid)) or # TODO: MISSING CONSTRAINT IN YANG MODEL - (type(intf_key) is tuple and intf_key[0] == 'Vlan{}'.format(vid))): - ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) - - keys = [(k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid)] - - if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) - - vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') - for vxmap_key, vxmap_data in vxlan_table.items(): - if vxmap_data['vlan'] == 'Vlan{}'.format(vid): - ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key))) - - # set dhcpv4_relay table - set_dhcp_relay_table('VLAN', config_db, vlan, None) + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + ctx.fail("{} does not exist".format(vlan)) + + intf_table = db.cfgdb.get_table('VLAN_INTERFACE') + for intf_key in intf_table: + if ((type(intf_key) is str and intf_key == 'Vlan{}'.format(vid)) or # TODO: MISSING CONSTRAINT IN YANG MODEL + (type(intf_key) is tuple and intf_key[0] == 'Vlan{}'.format(vid))): + ctx.fail("{} can not be removed. First remove IP addresses assigned to this VLAN".format(vlan)) - if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): - # set dhcpv6_relay table - set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) - # We need to restart dhcp_relay service after dhcpv6_relay config change - if is_dhcp_relay_running(): - dhcp_relay_util.handle_restart_dhcp_relay_service() + keys = [ (k, v) for k, v in db.cfgdb.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid) ] - delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) - delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + if keys: # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("VLAN ID {} can not be removed. First remove all members assigned to this VLAN.".format(vid)) + + vxlan_table = db.cfgdb.get_table('VXLAN_TUNNEL_MAP') + for vxmap_key, vxmap_data in vxlan_table.items(): + if vxmap_data['vlan'] == 'Vlan{}'.format(vid): + ctx.fail("vlan: {} can not be removed. First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key)) ) + + # set dhcpv4_relay table + set_dhcp_relay_table('VLAN', config_db, vlan, None) + + if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): + # set dhcpv6_relay table + set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) + # We need to restart dhcp_relay service after dhcpv6_relay config change + if is_dhcp_relay_running(): + dhcp_relay_util.handle_restart_dhcp_relay_service() + delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) vlans = db.cfgdb.get_keys('VLAN') if not vlans: @@ -231,159 +189,97 @@ def vlan_member(): pass @vlan_member.command('add') -@click.argument('vid', metavar='', required=True) +@click.argument('vid', metavar='', required=True, type=int) @click.argument('port', metavar='port', required=True) -@click.option('-u', '--untagged', is_flag=True, help="Untagged status") -@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") -@click.option('-e', '--except_flag', is_flag=True, help="Skips the given vlans and adds all other existing vlans") +@click.option('-u', '--untagged', is_flag=True) @clicommon.pass_db -def add_vlan_member(db, vid, port, untagged, multiple, except_flag): +def add_vlan_member(db, vid, port, untagged): """Add VLAN member""" ctx = click.get_current_context() - # parser will parse the vid input if there are syntax errors it will throw error + log.log_info("'vlan member add {} {}' executing...".format(vid, port)) - vid_list = clicommon.vlan_member_input_parser(ctx, "add", db, except_flag, multiple, vid, port) - - # multiple vlan command cannot be used to add multiple untagged vlan members - if untagged and (multiple or except_flag or vid == "all"): - ctx.fail("{} cannot have more than one untagged Vlan.".format(port)) + vlan = 'Vlan{}'.format(vid) config_db = ValidatedConfigDBConnector(db.cfgdb) - if ADHOC_VALIDATION: - for vid in vid_list: - - vlan = 'Vlan{}'.format(vid) - - # default vlan checker - if vid == 1: - ctx.fail("{} is default VLAN".format(vlan)) - - log.log_info("'vlan member add {} {}' executing...".format(vid, port)) - - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - log.log_info("{} does not exist".format(vlan)) - ctx.fail("{} does not exist".format(vlan)) - - if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL - alias = port - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - port = iface_alias_converter.alias_to_name(alias) - if port is None: - ctx.fail("cannot find port name for alias {}".format(alias)) - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if clicommon.is_port_mirror_dst_port(db.cfgdb, port): - ctx.fail("{} is configured as mirror destination port".format(port)) - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if clicommon.is_port_vlan_member(db.cfgdb, port, vlan): - log.log_info("{} is already a member of {}, Aborting!!!".format(port, vlan)) - ctx.fail("{} is already a member of {}, Aborting!!!".format(port, vlan)) - - - if clicommon.is_valid_port(db.cfgdb, port): - is_port = True - elif clicommon.is_valid_portchannel(db.cfgdb, port): - is_port = False - else: - ctx.fail("{} does not exist".format(port)) - - if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ - (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) - - portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): - ctx.fail("{} is part of portchannel!".format(port)) - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if (clicommon.interface_is_untagged_member(db.cfgdb, port) and untagged): - ctx.fail("{} is already untagged member!".format(port)) - - # checking mode status of port if its access, trunk or routed - if is_port: - port_data = config_db.get_entry('PORT',port) - - # if not port then is a port channel - elif not is_port: - port_data = config_db.get_entry('PORTCHANNEL',port) - - if "mode" not in port_data: - ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) - else: - existing_mode = port_data["mode"] - - if existing_mode == "routed": - ctx.fail("{} is in routed mode!\nUse switchport mode command to change port mode".format(port)) - - mode_type = "access" if untagged else "trunk" - if existing_mode == "access" and mode_type == "trunk": # TODO: MISSING CONSTRAINT IN YANG MODEL - ctx.fail("{} is in access mode! Tagged Members cannot be added".format(port)) - - elif existing_mode == mode_type or (existing_mode == "trunk" and mode_type == "access"): - pass - - # in case of exception in list last added member will be shown to user + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + ctx.fail("{} does not exist".format(vlan)) + + if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL + alias = port + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + port = iface_alias_converter.alias_to_name(alias) + if port is None: + ctx.fail("cannot find port name for alias {}".format(alias)) + + if clicommon.is_port_mirror_dst_port(db.cfgdb, port): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is configured as mirror destination port".format(port)) + + if clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is already a member of {}".format(port, vlan)) + + if clicommon.is_valid_port(db.cfgdb, port): + is_port = True + elif clicommon.is_valid_portchannel(db.cfgdb, port): + is_port = False + else: + ctx.fail("{} does not exist".format(port)) + + if (is_port and clicommon.is_port_router_interface(db.cfgdb, port)) or \ + (not is_port and clicommon.is_pc_router_interface(db.cfgdb, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is a router interface!".format(port)) - try: - config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged" }) - except ValueError: - ctx.fail("{} invalid or does not exist, or {} invalid or does not exist".format(vlan, port)) + portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') + + if (is_port and clicommon.interface_is_in_portchannel(portchannel_member_table, port)): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is part of portchannel!".format(port)) + + if (clicommon.interface_is_untagged_member(db.cfgdb, port) and untagged): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is already untagged member!".format(port)) + + try: + config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged" }) + except ValueError: + ctx.fail("{} invalid or does not exist, or {} invalid or does not exist".format(vlan, port)) @vlan_member.command('del') -@click.argument('vid', metavar='', required=True) +@click.argument('vid', metavar='', required=True, type=int) @click.argument('port', metavar='', required=True) -@click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") -@click.option('-e', '--except_flag', is_flag=True, help="Skips the given vlans and adds all other existing vlans") @clicommon.pass_db -def del_vlan_member(db, vid, port, multiple, except_flag): +def del_vlan_member(db, vid, port): """Delete VLAN member""" ctx = click.get_current_context() + log.log_info("'vlan member del {} {}' executing...".format(vid, port)) + vlan = 'Vlan{}'.format(vid) - # parser will parse the vid input if there are syntax errors it will throw error - - vid_list = clicommon.vlan_member_input_parser(ctx,"del", db, except_flag, multiple, vid, port) - config_db = ValidatedConfigDBConnector(db.cfgdb) if ADHOC_VALIDATION: - for vid in vid_list: - - log.log_info("'vlan member del {} {}' executing...".format(vid, port)) - - if not clicommon.is_vlanid_in_range(vid): - ctx.fail("Invalid VLAN ID {} (2-4094)".format(vid)) - - vlan = 'Vlan{}'.format(vid) - - if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: - log.log_info("{} does not exist".format(vlan)) - ctx.fail("{} does not exist, Aborting!!!".format(vlan)) - - if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL - alias = port - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - port = iface_alias_converter.alias_to_name(alias) - if port is None: - ctx.fail("cannot find port name for alias {}".format(alias)) - - # TODO: MISSING CONSTRAINT IN YANG MODEL - if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): - ctx.fail("{} is not a member of {}".format(port, vlan)) - - - try: - config_db.set_entry('VLAN_MEMBER', (vlan, port), None) - delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) - delete_db_entry("DHCP_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) - except JsonPatchConflict: - ctx.fail("{} invalid or does not exist, or {} is not a member of {}".format(vlan, port, vlan)) + if not clicommon.is_vlanid_in_range(vid): + ctx.fail("Invalid VLAN ID {} (1-4094)".format(vid)) + + if clicommon.check_if_vlanid_exist(db.cfgdb, vlan) == False: + ctx.fail("{} does not exist".format(vlan)) + + if clicommon.get_interface_naming_mode() == "alias": # TODO: MISSING CONSTRAINT IN YANG MODEL + alias = port + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + port = iface_alias_converter.alias_to_name(alias) + if port is None: + ctx.fail("cannot find port name for alias {}".format(alias)) + + if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL + ctx.fail("{} is not a member of {}".format(port, vlan)) + + try: + config_db.set_entry('VLAN_MEMBER', (vlan, port), None) + delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) + delete_db_entry("DHCP_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) + except JsonPatchConflict: + ctx.fail("{} invalid or does not exist, or {} is not a member of {}".format(vlan, port, vlan)) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index a38d27e3c47..0709b6d4f1d 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -162,8 +162,6 @@ * [Subinterfaces](#subinterfaces) * [Subinterfaces Show Commands](#subinterfaces-show-commands) * [Subinterfaces Config Commands](#subinterfaces-config-commands) -* [Switchport Modes](#switchport-modes) - * [Switchport Modes Config Commands](#switchportmodes-config-commands) * [Syslog](#syslog) * [Syslog show commands](#syslog-show-commands) * [Syslog config commands](#syslog-config-commands) @@ -4488,7 +4486,6 @@ Subsequent pages explain each of these commands in detail. neighbor Show neighbor related information portchannel Show PortChannel information status Show Interface status information - switchport Show Interface switchport information tpid Show Interface tpid information transceiver Show SFP Transceiver information ``` @@ -4954,53 +4951,6 @@ This command displays some more fields such as Lanes, Speed, MTU, Type, Asymmetr Ethernet180 105,106,107,108 100G 9100 hundredGigE46 down down N/A N/A ``` - -**show interface switchport status** - -This command displays switchport modes status of the interfaces - -- Usage: - ``` - show interfaces switchport status - ``` - -- Example (show interface switchport status of all interfaces): - ``` - admin@sonic:~$ show interfaces switchport status - Interface Mode - ----------- -------- - Ethernet0 access - Ethernet4 trunk - Ethernet8 routed - - ``` - -**show interface switchport config** - -This command displays switchport modes configuration of the interfaces - -- Usage: - ``` - show interfaces switchport config - ``` - -- Example (show interface switchport config of all interfaces): - ``` - admin@sonic:~$ show interfaces switchport config - Interface Mode Untagged Tagged - ----------- -------- -------- ------- - Ethernet0 access 2 - Ethernet4 trunk 3 4,5,6 - Ethernet8 routed - - ``` - - -For details please refer [Switchport Mode HLD](https://github.com/sonic-net/SONiC/pull/912/files#diff-03597c34684d527192f76a6e975792fcfc83f54e20dde63f159399232d148397) to know more about this command. - - - - **show interfaces transceiver** This command is already explained [here](#Transceivers) @@ -10153,41 +10103,6 @@ This sub-section explains how to configure subinterfaces. Go Back To [Beginning of the document](#) or [Beginning of this section](#subinterfaces) - - -## Switchport Modes - -### Switchport Modes Config Commands - -This subsection explains how to configure switchport modes on a Port/PortChannel. - -**config switchport mode ** - -Usage: - ``` - config switchport mode - ``` - -- Example (Config switchport mode access on "Ethernet0): - ``` - admin@sonic:~$ sudo config switchport mode access Ethernet0 - ``` - -- Example (Config switchport mode trunk on "Ethernet4"): - ``` - admin@sonic:~$ sudo config switchport mode trunk Ethernet4 - ``` - -- Example (Config switchport mode routed on "Ethernet12"): - ``` - admin@sonic:~$ sudo config switchport mode routed Ethernet12 - ``` - - - -Go Back To [Beginning of the document](#) or [Beginning of this section](#switchport-modes) - - ## Syslog ### Syslog Show Commands @@ -10900,31 +10815,6 @@ This command is used to add or delete the vlan. admin@sonic:~$ sudo config vlan add 100 ``` - -**config vlan add/del -m** - -This command is used to add or delete multiple vlans via single command. - -- Usage: - ``` - config vlan (add | del) -m - ``` - -- Example01 (Create the VLAN "Vlan100, Vlan101, Vlan102, Vlan103" if these does not already exist) - - ``` - admin@sonic:~$ sudo config vlan add -m 100-103 - ``` - - -- Example02 (Create the VLAN "Vlan105, Vlan106, Vlan107, Vlan108" if these does not already exist): - - ``` - admin@sonic:~$ sudo config vlan add -m 105,106,107,108 - ``` - - - **config vlan member add/del** This command is to add or delete a member port into the already created vlan. @@ -10946,49 +10836,6 @@ This command is to add or delete a member port into the already created vlan. This command will add Ethernet4 as member of the vlan 100. ``` - -**config vlan member add/del -m -e** - -This command is to add or delete a member port into multiple already created vlans. - -- Usage: - ``` - config vlan member add/del [-m] [-e] - ``` - -*NOTE: -m flag multiple Vlans in range or comma separted list can be added as a member port.* - - -*NOTE: -e is used as an except flag as explained with examples below.* - - -- Example: - ``` - admin@sonic:~$ sudo config vlan member add -m 100-103 Ethernet0 - This command will add Ethernet0 as member of the vlan 100, vlan 101, vlan 102, vlan 103 - ``` - - ``` - admin@sonic:~$ sudo config vlan member add -m 100,101,102 Ethernet4 - This command will add Ethernet4 as member of the vlan 100, vlan 101, vlan 102 - ``` - - ``` - admin@sonic:~$ sudo config vlan member add -e -m 104,105 Ethernet8 - Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet8 as member of vlan 100, vlan 101, vlan 102, vlan 103 - ``` - - ``` - admin@sonic:~$ sudo config vlan member add -e 100 Ethernet12 - Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet12 as member of vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 - ``` - - ``` - admin@sonic:~$ sudo config vlan member add all Ethernet20 - Suppose vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 are exisiting vlans. This command will add Ethernet20 as member of vlan 100, vlan 101, vlan 102, vlan 103, vlan 104, vlan 105 - ``` - - **config proxy_arp enabled/disabled** This command is used to enable or disable proxy ARP for a VLAN interface diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index fab66624148..c4d4e2da9c9 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -509,39 +509,6 @@ def migrate_config_db_port_table_for_auto_neg(self): self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'adv_speeds', value['speed']) elif value['autoneg'] == '0': self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'off') - - - def migrate_config_db_switchport_mode(self): - port_table = self.configDB.get_table('PORT') - portchannel_table = self.configDB.get_table('PORTCHANNEL') - vlan_member_table = self.configDB.get_table('VLAN_MEMBER') - - vlan_member_keys= [] - for _,key in vlan_member_table: - vlan_member_keys.append(key) - - for p_key, p_value in port_table.items(): - if 'mode' in p_value: - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format("PORT", p_key), 'mode', p_value['mode']) - else: - if p_key in vlan_member_keys: - p_value["mode"] = "trunk" - self.configDB.set_entry("PORT", p_key, p_value) - else: - p_value["mode"] = "routed" - self.configDB.set_entry("PORT", p_key, p_value) - - for pc_key, pc_value in portchannel_table.items(): - if 'mode' in pc_value: - self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format("PORTCHANNEL", pc_key), 'mode', pc_value['mode']) - else: - if pc_key in vlan_member_keys: - pc_value["mode"] = "trunk" - self.configDB.set_entry("PORTCHANNEL", pc_key, pc_value) - else: - pc_value["mode"] = "routed" - self.configDB.set_entry("PORTCHANNEL", pc_key, pc_value) - def migrate_qos_db_fieldval_reference_remove(self, table_list, db, db_num, db_delimeter): for pair in table_list: @@ -1014,7 +981,6 @@ def version_3_0_0(self): """ log.log_info('Handling version_3_0_0') self.migrate_config_db_port_table_for_auto_neg() - self.migrate_config_db_switchport_mode() self.set_version('version_3_0_1') return 'version_3_0_1' @@ -1030,9 +996,7 @@ def version_3_0_1(self): for name, data in portchannel_table.items(): data['lacp_key'] = 'auto' self.configDB.set_entry('PORTCHANNEL', name, data) - self.migrate_config_db_switchport_mode() self.set_version('version_3_0_2') - return 'version_3_0_2' def version_3_0_2(self): @@ -1168,7 +1132,7 @@ def version_4_0_3(self): Version 4_0_3. """ log.log_info('Handling version_4_0_3') - + self.set_version('version_202305_01') return 'version_202305_01' diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 497950b80ed..a5a3734664f 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -797,70 +797,3 @@ def fec_status(interfacename, namespace, display, verbose): cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) - - -# -# switchport group (show interfaces switchport ...) -# -@interfaces.group(name='switchport', cls=clicommon.AliasedGroup) -def switchport(): - """Show interface switchport information""" - pass - - -@switchport.command(name="config") -@clicommon.pass_db -def switchport_mode_config(db): - """Show interface switchport config information""" - - port_data = list(db.cfgdb.get_table('PORT').keys()) - portchannel_data = list(db.cfgdb.get_table('PORTCHANNEL').keys()) - - portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') - - for interface in port_data: - if clicommon.interface_is_in_portchannel(portchannel_member_table,interface): - port_data.remove(interface) - - - keys = port_data + portchannel_data - - def tablelize(keys): - table = [] - - for key in natsorted(keys): - r = [clicommon.get_interface_name_for_display(db, key), clicommon.get_interface_switchport_mode(db,key), clicommon.get_interface_untagged_vlan_members(db,key), clicommon.get_interface_tagged_vlan_members(db,key)] - table.append(r) - - return table - - header = ['Interface', 'Mode', 'Untagged', 'Tagged'] - click.echo(tabulate(tablelize(keys), header, tablefmt="simple", stralign='left')) - -@switchport.command(name="status") -@clicommon.pass_db -def switchport_mode_status(db): - """Show interface switchport status information""" - - port_data = list(db.cfgdb.get_table('PORT').keys()) - portchannel_data = list(db.cfgdb.get_table('PORTCHANNEL').keys()) - - portchannel_member_table = db.cfgdb.get_table('PORTCHANNEL_MEMBER') - - for interface in port_data: - if clicommon.interface_is_in_portchannel(portchannel_member_table,interface): - port_data.remove(interface) - - keys = port_data + portchannel_data - - def tablelize(keys): - table = [] - - for key in natsorted(keys): - r = [clicommon.get_interface_name_for_display(db, key), clicommon.get_interface_switchport_mode(db,key)] - table.append(r) - - return table - - header = ['Interface', 'Mode'] - click.echo(tabulate(tablelize(keys), header,tablefmt="simple", stralign='left')) diff --git a/tests/db_migrator_input/config_db/port-an-expected.json b/tests/db_migrator_input/config_db/port-an-expected.json index 14bdc415f4f..1ef2cf49163 100644 --- a/tests/db_migrator_input/config_db/port-an-expected.json +++ b/tests/db_migrator_input/config_db/port-an-expected.json @@ -5,7 +5,6 @@ "description": "etp1a", "mtu": "9100", "alias": "etp1a", - "mode": "routed", "pfc_asym": "off", "speed": "10000", "fec": "none", @@ -19,7 +18,6 @@ "admin_status": "up", "mtu": "9100", "alias": "etp1b", - "mode": "routed", "pfc_asym": "off", "speed": "25000", "fec": "none", @@ -32,7 +30,6 @@ "admin_status": "up", "mtu": "9100", "alias": "etp2a", - "mode": "routed", "pfc_asym": "off", "speed": "50000", "fec": "none" diff --git a/tests/db_migrator_input/config_db/portchannel-expected.json b/tests/db_migrator_input/config_db/portchannel-expected.json index 874212b2f75..2644e5f4e9d 100644 --- a/tests/db_migrator_input/config_db/portchannel-expected.json +++ b/tests/db_migrator_input/config_db/portchannel-expected.json @@ -3,7 +3,6 @@ "admin_status": "up", "members@": "Ethernet0,Ethernet4", "min_links": "2", - "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -11,7 +10,6 @@ "admin_status": "up", "members@": "Ethernet8,Ethernet12", "min_links": "2", - "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -19,7 +17,6 @@ "admin_status": "up", "members@": "Ethernet16", "min_links": "1", - "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, @@ -27,14 +24,12 @@ "admin_status": "up", "members@": "Ethernet20,Ethernet24", "min_links": "2", - "mode": "routed", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel9999": { "admin_status": "up", "mtu": "9100", - "mode": "routed", "lacp_key": "auto" }, "VERSIONS|DATABASE": { diff --git a/tests/db_migrator_input/config_db/switchport-expected.json b/tests/db_migrator_input/config_db/switchport-expected.json deleted file mode 100644 index 812abbd58fd..00000000000 --- a/tests/db_migrator_input/config_db/switchport-expected.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "PORT|Ethernet0": { - "admin_status": "up", - "alias": "fortyGigE0/0", - "index": "0", - "lanes": "25,26,27,28", - "mode": "trunk", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "index": "1", - "lanes": "29,30,31,32", - "mode": "routed", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "index": "2", - "lanes": "33,34,35,36", - "mode": "trunk", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet12": { - "admin_status": "up", - "alias": "fortyGigE0/12", - "index": "3", - "lanes": "37,38,39,40", - "mode": "access", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet16": { - "admin_status": "up", - "alias": "fortyGigE0/16", - "index": "4", - "lanes": "45,46,47,48", - "mode": "routed", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet20": { - "admin_status": "up", - "alias": "fortyGigE0/20", - "index": "5", - "lanes": "41,42,43,44", - "mode": "trunk", - "mtu": "9100", - "speed": "40000" - }, - - "VLAN|Vlan2": { - "vlanid": "2" - }, - "VLAN|Vlan3": { - "vlanid": "3" - }, - "VLAN|Vlan4": { - "vlanid": "4" - }, - "VLAN|Vlan5": { - "vlanid": "5" - }, - "VLAN|Vlan6": { - "vlanid": "6" - }, - "VLAN|Vlan7": { - "vlanid": "7" - }, - - - "VLAN_MEMBER|Vlan2|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan3|Ethernet8": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan4|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan6|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan6|Ethernet8": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan7|Ethernet8": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan5|Ethernet8": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan3|PortChannel0003": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan8|PortChannel0002": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan9|PortChannel0002": { - "tagging_mode": "tagged" - }, - - "PORTCHANNEL|PortChannel0001": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "access", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0002": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "trunk", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0003": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "trunk", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0004": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "routed", - "mtu": "9100" - }, - - "VERSIONS|DATABASE": { - "VERSION": "version_3_0_1" - } -} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/switchport-input.json b/tests/db_migrator_input/config_db/switchport-input.json deleted file mode 100644 index c1ad306ce4d..00000000000 --- a/tests/db_migrator_input/config_db/switchport-input.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "PORT|Ethernet0": { - "admin_status": "up", - "alias": "fortyGigE0/0", - "index": "0", - "lanes": "25,26,27,28", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "index": "1", - "lanes": "29,30,31,32", - "mode": "routed", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet12": { - "admin_status": "up", - "alias": "fortyGigE0/12", - "index": "3", - "lanes": "37,38,39,40", - "mode": "access", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet16": { - "admin_status": "up", - "alias": "fortyGigE0/16", - "index": "4", - "lanes": "45,46,47,48", - "mtu": "9100", - "speed": "40000" - }, - "PORT|Ethernet20": { - "admin_status": "up", - "alias": "fortyGigE0/20", - "index": "5", - "lanes": "41,42,43,44", - "mode": "trunk", - "mtu": "9100", - "speed": "40000" - }, - "VLAN|Vlan2": { - "vlanid": "2" - }, - "VLAN|Vlan3": { - "vlanid": "3" - }, - "VLAN|Vlan4": { - "vlanid": "4" - }, - "VLAN|Vlan5": { - "vlanid": "5" - }, - "VLAN|Vlan6": { - "vlanid": "6" - }, - "VLAN|Vlan7": { - "vlanid": "7" - }, - - "VLAN_MEMBER|Vlan2|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan3|Ethernet8": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan4|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan6|Ethernet0": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan6|Ethernet8": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan7|Ethernet8": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan5|Ethernet8": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan3|PortChannel0003": { - "tagging_mode": "untagged" - }, - "VLAN_MEMBER|Vlan8|PortChannel0002": { - "tagging_mode": "tagged" - }, - "VLAN_MEMBER|Vlan9|PortChannel0002": { - "tagging_mode": "tagged" - }, - - - "PORTCHANNEL|PortChannel0001": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "access", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0002": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mode": "trunk", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0003": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mtu": "9100" - }, - "PORTCHANNEL|PortChannel0004": { - "admin_status": "up", - "fast_rate": "false", - "lacp_key": "auto", - "min_links": "1", - "mtu": "9100" - }, - - "VERSIONS|DATABASE": { - "VERSION": "version_3_0_0" - } -} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 470f613d377..21ca9148df6 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -309,33 +309,6 @@ def test_port_autoneg_migrator(self): assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') - - -class TestSwitchPortMigrator(object): - @classmethod - def setup_class(cls): - os.environ['UTILITIES_UNIT_TESTING'] = "2" - - @classmethod - def teardown_class(cls): - os.environ['UTILITIES_UNIT_TESTING'] = "0" - dbconnector.dedicated_dbs['CONFIG_DB'] = None - - def test_switchport_mode_migrator(self): - dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'switchport-input') - import db_migrator - dbmgtr = db_migrator.DBMigrator(None) - dbmgtr.migrate() - - dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'switchport-expected') - expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_3_0_1') - - assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') - assert dbmgtr.configDB.get_table('PORTCHANNEL') == expected_db.cfgdb.get_table('PORTCHANNEL') - assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') - - class TestInitConfigMigrator(object): @classmethod def setup_class(cls): @@ -901,7 +874,6 @@ def test_golden_config_hostname(self): # hostname is from minigraph.xml assert hostname == 'SONiC-Dummy' - class TestMain(object): @classmethod def setup_class(cls): diff --git a/tests/interfaces_test.py b/tests/interfaces_test.py index 717dcbb4c9b..c3246ba0261 100644 --- a/tests/interfaces_test.py +++ b/tests/interfaces_test.py @@ -144,86 +144,6 @@ 1001 PortChannel1001 N/A """ - -show_interfaces_switchport_status_output="""\ -Interface Mode ---------------- ------ -Ethernet0 routed -Ethernet4 trunk -Ethernet8 routed -Ethernet12 routed -Ethernet16 trunk -Ethernet20 routed -Ethernet24 trunk -Ethernet28 trunk -Ethernet36 routed -Ethernet40 routed -Ethernet44 routed -Ethernet48 routed -Ethernet52 routed -Ethernet56 routed -Ethernet60 routed -Ethernet64 routed -Ethernet68 routed -Ethernet72 routed -Ethernet76 routed -Ethernet80 routed -Ethernet84 routed -Ethernet88 routed -Ethernet92 routed -Ethernet96 routed -Ethernet100 routed -Ethernet104 routed -Ethernet108 routed -Ethernet116 routed -Ethernet124 routed -PortChannel0001 routed -PortChannel0002 routed -PortChannel0003 routed -PortChannel0004 routed -PortChannel1001 trunk -""" - -show_interfaces_switchport_config_output = """\ -Interface Mode Untagged Tagged ---------------- ------ ---------- -------- -Ethernet0 routed -Ethernet4 trunk 1000 -Ethernet8 routed 1000 -Ethernet12 routed 1000 -Ethernet16 trunk 1000 -Ethernet20 routed -Ethernet24 trunk 2000 -Ethernet28 trunk 2000 -Ethernet36 routed -Ethernet40 routed -Ethernet44 routed -Ethernet48 routed -Ethernet52 routed -Ethernet56 routed -Ethernet60 routed -Ethernet64 routed -Ethernet68 routed -Ethernet72 routed -Ethernet76 routed -Ethernet80 routed -Ethernet84 routed -Ethernet88 routed -Ethernet92 routed -Ethernet96 routed -Ethernet100 routed -Ethernet104 routed -Ethernet108 routed -Ethernet116 routed -Ethernet124 routed -PortChannel0001 routed -PortChannel0002 routed -PortChannel0003 routed -PortChannel0004 routed -PortChannel1001 trunk 4000 -""" - - class TestInterfaces(object): @classmethod def setup_class(cls): @@ -417,26 +337,6 @@ def test_parse_interface_in_filter(self): assert len(intf_list) == 3 assert intf_list == ["Ethernet-BP10", "Ethernet-BP11", "Ethernet-BP12"] - - def test_show_interfaces_switchport_status(self): - runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["switchport"].commands["status"]) - print(result.exit_code) - print(result.output) - - assert result.exit_code == 0 - assert result.output == show_interfaces_switchport_status_output - - def test_show_interfaces_switchport_config(self): - runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["switchport"].commands["config"]) - print(result.exit_code) - print(result.output) - - assert result.exit_code == 0 - assert result.output == show_interfaces_switchport_config_output - - @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/ipv6_link_local_test.py b/tests/ipv6_link_local_test.py index bb9e53ac1ab..50b691be6b1 100644 --- a/tests/ipv6_link_local_test.py +++ b/tests/ipv6_link_local_test.py @@ -232,7 +232,7 @@ def test_vlan_member_add_on_link_local_interface(self): result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["4000", "Ethernet40"], obj=obj) print(result.output) assert result.exit_code != 0 - assert 'Error: Ethernet40 is in routed mode!\nUse switchport mode command to change port mode' in result.output + assert 'Error: Ethernet40 is a router interface!' in result.output @classmethod def teardown_class(cls): diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 4ecc654c326..3117115489b 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -75,7 +75,6 @@ "admin_status": "up", "members@": "Ethernet0,Ethernet4", "min_links": "2", - "mode": "trunk", "mtu": "9100" }, "PORTCHANNEL|PortChannel4001": { diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 0ef506c2887..2a81f96bfac 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -30,7 +30,6 @@ "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -42,7 +41,6 @@ "lanes": "29,30,31,32", "mtu": "9100", "tpid": "0x8100", - "mode": "trunk", "pfc_asym": "off", "speed": "40000" }, @@ -54,7 +52,6 @@ "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -66,7 +63,6 @@ "lanes": "37,38,39,40", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -78,7 +74,6 @@ "lanes": "16", "mtu": "9100", "tpid": "0x8100", - "mode": "trunk", "pfc_asym": "off", "speed": "100" }, @@ -90,7 +85,6 @@ "lanes": "41,42,43,44", "mtu": "9100", "tpid": "0x9200", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -102,7 +96,6 @@ "lanes": "1,2,3,4", "mtu": "9100", "tpid": "0x8100", - "mode": "trunk", "pfc_asym": "off", "speed": "1000" }, @@ -114,7 +107,6 @@ "lanes": "5,6,7,8", "mtu": "9100", "tpid": "0x8100", - "mode": "trunk", "pfc_asym": "off", "speed": "1000" }, @@ -126,7 +118,6 @@ "lanes": "13,14,15,16", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -138,7 +129,6 @@ "lanes": "9,10,11,12", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "10" }, @@ -150,7 +140,6 @@ "lanes": "17,18,19,20", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -162,7 +151,6 @@ "lanes": "21,22,23,24", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -174,7 +162,6 @@ "lanes": "53,54,55,56", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -186,7 +173,6 @@ "lanes": "49,50,51,52", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -198,7 +184,6 @@ "lanes": "57,58,59,60", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -210,7 +195,6 @@ "lanes": "61,62,63,64", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -222,7 +206,6 @@ "lanes": "69,70,71,72", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -234,7 +217,6 @@ "lanes": "65,66,67,68", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -246,7 +228,6 @@ "lanes": "73,74,75,76", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -258,7 +239,6 @@ "lanes": "77,78,79,80", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -270,7 +250,6 @@ "lanes": "109,110,111,112", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -282,7 +261,6 @@ "lanes": "105,106,107,108", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -294,7 +272,6 @@ "lanes": "113,114,115,116", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -306,7 +283,6 @@ "lanes": "117,118,119,120", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -318,7 +294,6 @@ "lanes": "125,126,127,128", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -329,7 +304,6 @@ "lanes": "121,122,123,124", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -340,7 +314,6 @@ "lanes": "81,82,83,84", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -351,7 +324,6 @@ "lanes": "85,86,87,88", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -363,7 +335,6 @@ "lanes": "93,94,95,96", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -375,7 +346,6 @@ "lanes": "89,90,91,92", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -387,7 +357,6 @@ "lanes": "101,102,103,104", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000" }, @@ -399,7 +368,6 @@ "lanes": "97,98,99,100", "mtu": "9100", "tpid": "0x8100", - "mode": "routed", "pfc_asym": "off", "speed": "40000", "fec" : "auto" @@ -703,8 +671,7 @@ "members@": "Ethernet32", "min_links": "1", "tpid": "0x8100", - "mtu": "9100", - "mode": "trunk" + "mtu": "9100" }, "PORTCHANNEL|PortChannel0001": { "admin_status": "up", diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 4d7ed0e947c..436e309281d 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -134,124 +134,6 @@ +-----------+-----------------+-----------------+----------------+-------------+ """ - -test_config_add_del_multiple_vlan_and_vlan_member_output="""\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1002 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1003 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | | | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | PortChannel1001 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - -test_config_add_del_add_vlans_and_add_all_vlan_member_output="""\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -| | | Ethernet20 | tagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1002 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1003 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet20 | tagged | enabled | -| | fc02:1011::1/64 | Ethernet24 | untagged | | -| | | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | Ethernet20 | tagged | disabled | -| | | PortChannel1001 | tagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - -test_config_add_del_add_vlans_and_add_vlans_member_except_vlan_output = """\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1002 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet20 | tagged | enabled | -| | fc02:1011::1/64 | Ethernet24 | untagged | | -| | | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | PortChannel1001 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - -test_config_add_del_add_vlans_and_add_vlans_member_except_vlan__after_del_member_output = """\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1002 | | Ethernet20 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | | | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | PortChannel1001 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - -test_config_add_del_vlan_and_vlan_member_with_switchport_modes_output = """\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -| | | Ethernet20 | tagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | Ethernet20 | untagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | | | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | PortChannel1001 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - config_add_del_vlan_and_vlan_member_in_alias_mode_output="""\ +-----------+-----------------+-----------------+----------------+-------------+ | VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | @@ -273,28 +155,6 @@ """ - -test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types_output = """\ -+-----------+-----------------+-----------------+----------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | -+===========+=================+=================+================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | | -| | | Ethernet12 | untagged | | -| | | Ethernet16 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 1001 | | | | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | | -+-----------+-----------------+-----------------+----------------+-------------+ -| 3000 | | | | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -| 4000 | | PortChannel1001 | tagged | disabled | -+-----------+-----------------+-----------------+----------------+-------------+ -""" - - class TestVlan(object): _old_run_bgp_command = None @classmethod @@ -376,7 +236,7 @@ def test_config_vlan_add_vlan_with_invalid_vlanid(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output def test_config_vlan_add_vlan_with_exist_vlanid(self): runner = CliRunner() @@ -392,7 +252,7 @@ def test_config_vlan_del_vlan_with_invalid_vlanid(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output def test_config_vlan_del_vlan_with_nonexist_vlanid(self): runner = CliRunner() @@ -402,80 +262,13 @@ def test_config_vlan_del_vlan_with_nonexist_vlanid(self): assert result.exit_code != 0 assert "Error: Vlan1001 does not exist" in result.output - - def test_config_vlan_add_vlan_with_multiple_vlanids(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10,20,30,40", "--multiple"]) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - def test_config_vlan_add_vlan_with_multiple_vlanids_with_range(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10-20", "--multiple"]) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - def test_config_vlan_add_vlan_with_multiple_vlanids_with_range_and_multiple_ids(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["10-15,20,25,30", "--multiple"]) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - def test_config_vlan_add_vlan_with_wrong_range(self): - runner = CliRunner() - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["15-10", "--multiple"]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "15 is greater than 10. List cannot be generated" in result.output - - def test_config_vlan_add_vlan_range_with_default_vlan(self): - runner = CliRunner() - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1-10", "--multiple"]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "Vlan1 is default vlan" in result.output - - def test_config_vlan_add_vlan_is_digit_fail(self): - runner = CliRunner() - vid = "test_fail_case" - result = runner.invoke(config.config.commands["vlan"].commands["add"], [vid]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "{} is not integer".format(vid) in result.output - - def test_config_vlan_add_vlan_is_default_vlan(self): - runner = CliRunner() - default_vid = "1" - vlan = "Vlan{}".format(default_vid) - result = runner.invoke(config.config.commands["vlan"].commands["add"], [default_vid]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "{} is default VLAN.".format(vlan) in result.output - - def test_config_vlan_del_vlan_does_not_exist(self): - runner = CliRunner() - vid = "3010" - vlan = "Vlan{}".format(vid) - result = runner.invoke(config.config.commands["vlan"].commands["del"], [vid]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "{} does not exist".format(vlan) in result.output - def test_config_vlan_add_member_with_invalid_vlanid(self): runner = CliRunner() result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["4096", "Ethernet4"]) print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: Invalid VLAN ID 4096 (2-4094)" in result.output + assert "Error: Invalid VLAN ID 4096 (1-4094)" in result.output def test_config_vlan_add_member_with_nonexist_vlanid(self): runner = CliRunner() @@ -501,16 +294,8 @@ def test_config_vlan_add_nonexist_port_member(self): assert result.exit_code != 0 assert "Error: Ethernet3 does not exist" in result.output - def test_config_vlan_add_nonexist_portchannel_member(self): runner = CliRunner() - #switch port mode for PortChannel1011 to trunk mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "PortChannel1011"]) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "Error: PortChannel1011 does not exist" in result.output - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], \ ["1000", "PortChannel1011"]) print(result.exit_code) @@ -518,7 +303,6 @@ def test_config_vlan_add_nonexist_portchannel_member(self): assert result.exit_code != 0 assert "Error: PortChannel1011 does not exist" in result.output - def test_config_vlan_add_portchannel_member(self): runner = CliRunner() db = Db() @@ -529,7 +313,6 @@ def test_config_vlan_add_portchannel_member(self): print(result.output) assert result.exit_code == 0 - # show output result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) print(result.exit_code) @@ -546,7 +329,7 @@ def test_config_vlan_add_rif_portchannel_member(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: PortChannel0001 is in routed mode!\nUse switchport mode command to change port mode" in result.output + assert "Error: PortChannel0001 is a router interface!" in result.output def test_config_vlan_with_vxlanmap_del_vlan(self, mock_restart_dhcp_relay_service): runner = CliRunner() @@ -674,22 +457,6 @@ def test_config_add_del_vlan_and_vlan_member(self, mock_restart_dhcp_relay_servi print(result.output) assert result.exit_code == 0 - # add Ethernet20 to vlan 1001 but Ethernet20 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "Ethernet20", "--untagged"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure Ethernet20 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to access mode" in result.output - # add Ethernet20 to vlan 1001 result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["1001", "Ethernet20", "--untagged"], obj=db) @@ -735,22 +502,6 @@ def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dh print(result.output) assert result.exit_code == 0 - # add etp6 to vlan 1001 but etp6 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "etp6", "--untagged"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure etp6 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "etp6"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to access mode" in result.output - # add etp6 to vlan 1001 result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["1001", "etp6", "--untagged"], obj=db) @@ -787,375 +538,6 @@ def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self, mock_restart_dh os.environ['SONIC_CLI_IFACE_MODE'] = "default" - - def test_config_add_del_multiple_vlan_and_vlan_member(self,mock_restart_dhcp_relay_service): - runner = CliRunner() - db = Db() - - # add vlan 1001,1002,1003 - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002,1003","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001,1002,1003", "Ethernet20", "--multiple"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure Ethernet20 from routed to trunk mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to trunk mode" in result.output - - # add Ethernet20 to vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001,1002,1003", "Ethernet20", "--multiple"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == test_config_add_del_multiple_vlan_and_vlan_member_output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1001-1003", "Ethernet20", "--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add del 1001 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1003","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == show_vlan_brief_output - - def test_config_add_del_add_vlans_and_add_vlans_member_except_vlan(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - db = Db() - - # add vlan 1001,1002 - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1000,4000", "Ethernet20", "--multiple", "--except_flag"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure Ethernet20 from routed to trunk mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to trunk mode" in result.output - - # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1000,4000", "Ethernet20", "--multiple", "--except_flag"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == test_config_add_del_add_vlans_and_add_vlans_member_except_vlan_output - - # remove vlan member except some - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1001,1002", "Ethernet20", "--multiple", "--except_flag"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == test_config_add_del_add_vlans_and_add_vlans_member_except_vlan__after_del_member_output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1001,1002", "Ethernet20", "--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # del 1001,1002 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1002","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == show_vlan_brief_output - - - def test_config_add_del_add_vlans_and_add_all_vlan_member(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - db = Db() - - # add vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001,1002,1003","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add Ethernet20 to vlan1001, vlan1002, vlan1003 multiple flag but Ethernet20 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["all", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure Ethernet20 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to trunk mode" in result.output - - # add Ethernet20 to vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["all", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == test_config_add_del_add_vlans_and_add_all_vlan_member_output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["all", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add del 1001 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001-1003","--multiple"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == show_vlan_brief_output - - def test_config_add_del_vlan_and_vlan_member_with_switchport_modes(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - db = Db() - - # add vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add Ethernet20 to vlan 1001 but Ethernet20 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "Ethernet20", "--untagged"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - - # configure Ethernet20 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from routed to access mode" in result.output - - # add Ethernet20 to vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "Ethernet20", "--untagged"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # add Ethernet20 to vlan 1001 as tagged member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1000", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet20 is in access mode! Tagged Members cannot be added" in result.output - - # configure Ethernet20 from access to trunk mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from access to trunk mode" in result.output - - # add Ethernet20 to vlan 1001 as tagged member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1000", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == test_config_add_del_vlan_and_vlan_member_with_switchport_modes_output - - # configure Ethernet20 from trunk to routed mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "Ethernet20 has tagged member(s). \nRemove them to change mode to routed" in result.output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1000", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # configure Ethernet20 from trunk to routed mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "Ethernet20 has untagged member. \nRemove it to change mode to routed" in result.output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1001", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # configure Ethernet20 from trunk to routed mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["routed", "Ethernet20"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet20 switched from trunk to routed mode" in result.output - - # add del 1001 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1001"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == show_vlan_brief_output - - - def test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types(self, mock_restart_dhcp_relay_service): - runner = CliRunner() - db = Db() - - # add vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["add"], ["1001"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # add Ethernet64 to vlan 1001 but Ethernet64 is in routed mode will give error - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Ethernet64 is in routed mode!\nUse switchport mode command to change port mode" in result.output - - # configure Ethernet64 from routed to trunk mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["trunk", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet64 switched from routed to trunk mode" in result.output - - # add Ethernet64 to vlan 1001 - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], - ["1001", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - - # configure Ethernet64 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code != 0 - assert "Ethernet64 is in trunk mode and have tagged member(s).\nRemove tagged member(s) from Ethernet64 to switch to access mode" in result.output - - # remove vlan member - result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["del"], - ["1001", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - - # configure Ethernet64 from routed to access mode - result = runner.invoke(config.config.commands["switchport"].commands["mode"],["access", "Ethernet64"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert "Ethernet64 switched from trunk to access mode" in result.output - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == test_config_add_del_vlan_and_vlan_member_with_switchport_modes_and_change_mode_types_output - - def test_config_vlan_proxy_arp_with_nonexist_vlan_intf_table(self): modes = ["enabled", "disabled"] runner = CliRunner() @@ -1244,8 +626,8 @@ def test_config_set_router_port_on_member_interface(self): result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) - assert result.exit_code != 0 - assert 'Interface Ethernet4 is not in routed mode!' in result.output + assert result.exit_code == 0 + assert 'Interface Ethernet4 is a member of vlan' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() diff --git a/utilities_common/cli.py b/utilities_common/cli.py index af3ca849037..9d3cdae7109 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -269,111 +269,6 @@ def is_port_vlan_member(config_db, port, vlan): return False - -def vlan_range_list(ctx, vid_range: str) -> list: - - vid1, vid2 = map(int, vid_range.split("-")) - - if vid1 == 1 or vid2 == 1: - ctx.fail("Vlan1 is default vlan") - - if vid1 >= vid2: - ctx.fail("{} is greater than {}. List cannot be generated".format(vid1,vid2)) - - if is_vlanid_in_range(vid1) and is_vlanid_in_range(vid2): - return list(range(vid1, vid2+1)) - else: - ctx.fail("Invalid VLAN ID must be in (2-4094)") - - -def multiple_vlan_parser(ctx, s_input: str) -> list: - - vlan_list = [] - - vlan_map = map(str, s_input.replace(" ", "").split(",")) - for vlan in vlan_map: - if "-" in vlan: - vlan_list += vlan_range_list(ctx, vlan) - elif vlan.isdigit() and int(vlan) not in vlan_list: - vlan_list.append(int(vlan)) - elif not vlan.isdigit(): - ctx.fail("{} is not integer".format(vlan)) - - vlan_list.sort() - return vlan_list - - -def get_existing_vlan_id(db) -> list: - existing_vlans = [] - vlan_data = db.cfgdb.get_table('VLAN') - - for i in vlan_data.keys(): - existing_vlans.append(int(i.strip("Vlan"))) - - return sorted(existing_vlans) - -def get_existing_vlan_id_on_interface(db,port) -> list: - intf_vlans = [] - vlan_member_data = db.cfgdb.get_table('VLAN_MEMBER') - - for (k,v) in vlan_member_data.keys(): - if v == port: - intf_vlans.append(int(k.strip("Vlan"))) - - return sorted(intf_vlans) - - -def vlan_member_input_parser(ctx, command_mode, db, except_flag, multiple, vid, port) -> list: - vid_list = [] - if vid == "all": - if command_mode == "add": - return get_existing_vlan_id(db) # config vlan member add - if command_mode == "del": - return get_existing_vlan_id_on_interface(db,port) # config vlan member del - - if multiple: - vid_list = multiple_vlan_parser(ctx, vid) - - if except_flag: - if command_mode == "add": - comp_list = get_existing_vlan_id(db) # config vlan member add - - elif command_mode == "del": - comp_list = get_existing_vlan_id_on_interface(db,port) # config vlan member del - - if multiple: - for i in vid_list: - if i in comp_list: - comp_list.remove(i) - - else: - if not vid.isdigit(): - ctx.fail("Vlan is not integer.") - vid = int(vid) - if vid in comp_list: - comp_list.remove(vid) - vid_list = comp_list - - elif not multiple: - # if entered vlan is not a integer - if not vid.isdigit(): - ctx.fail("Vlan is not integer.") - vid_list.append(int(vid)) - - # sorting the vid_list - vid_list.sort() - return vid_list - -def interface_is_tagged_member(db, interface_name): - """ Check if interface has tagged members i.e. is in trunk mode""" - vlan_member_table = db.get_table('VLAN_MEMBER') - - for key, val in vlan_member_table.items(): - if(key[1] == interface_name): - if (val['tagging_mode'] == 'tagged'): - return True - return False - def interface_is_in_vlan(vlan_member_table, interface_name): """ Check if an interface is in a vlan """ for _,intf in vlan_member_table: @@ -414,55 +309,6 @@ def is_pc_router_interface(config_db, pc): return False -def get_vlan_id(vlan): - vlan_prefix, vid = vlan.split('Vlan') - return vid - -def get_interface_name_for_display(db ,interface): - interface_naming_mode = get_interface_naming_mode() - iface_alias_converter = InterfaceAliasConverter(db) - if interface_naming_mode == "alias" and interface: - return iface_alias_converter.name_to_alias(interface) - return interface - -def get_interface_untagged_vlan_members(db,interface): - untagged_vlans = [] - vlan_member = db.cfgdb.get_table('VLAN_MEMBER') - - for member in natsorted(list(vlan_member.keys())): - interface_vlan, interface_name = member - - if interface == interface_name and vlan_member[member]['tagging_mode'] == 'untagged': - untagged_vlans.append(get_vlan_id(interface_vlan)) - - return "\n".join(untagged_vlans) - -def get_interface_tagged_vlan_members(db,interface): - tagged_vlans = [] - formatted_tagged_vlans = [] - vlan_member = db.cfgdb.get_table('VLAN_MEMBER') - - for member in natsorted(list(vlan_member.keys())): - interface_vlan, interface_name = member - - if interface == interface_name and vlan_member[member]['tagging_mode'] == 'tagged': - tagged_vlans.append(get_vlan_id(interface_vlan)) - - for i in range(len(tagged_vlans)//5+1): - formatted_tagged_vlans.append(" ,".join([str(x) for x in tagged_vlans[i*5:(i+1)*5]])) - - return "\n".join(formatted_tagged_vlans) - -def get_interface_switchport_mode(db, interface): - port = db.cfgdb.get_entry('PORT',interface) - portchannel = db.cfgdb.get_entry('PORTCHANNEL',interface) - switchport_mode = 'routed' - if "mode" in port: - switchport_mode = port['mode'] - elif "mode" in portchannel: - switchport_mode = portchannel['mode'] - return switchport_mode - def is_port_mirror_dst_port(config_db, port): """Check if port is already configured as mirror destination port """ mirror_table = config_db.get_table('MIRROR_SESSION') @@ -864,4 +710,4 @@ def remove(self): def remove_all(self): """ Remove the content of the cache for all users """ - shutil.rmtree(self.cache_directory_app) \ No newline at end of file + shutil.rmtree(self.cache_directory_app) From 52e9117c7c00748aa5836f6e8f92bb914da4e4bb Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Mon, 1 Apr 2024 11:20:32 +0800 Subject: [PATCH 25/45] [dualtor_neighbor_check] Fix the script not exists issue (#3244) What I did Fix sonic-net/sonic-buildimage#17200 Microsoft ADO (number only): 27317600 Upon advanced-reboot, the Lua script is flushed, so the EVALSHA command might suffer from the script not-existed issue. Let's check if the script exists before calling EVALSHA. Signed-off-by: Longxiang Lyu lolv@microsoft.com How I did it As the above. How to verify it UT and verify on dualtor testbed. Previous command output (if the output of a command-line utility has changed) New command output (if the output of a command-line utility has changed) --- scripts/dualtor_neighbor_check.py | 13 ++++- tests/dualtor_neighbor_check_test.py | 72 +++++++++++++++++++++++----- 2 files changed, 72 insertions(+), 13 deletions(-) diff --git a/scripts/dualtor_neighbor_check.py b/scripts/dualtor_neighbor_check.py index 39de3c676f9..5ceb327c4c2 100755 --- a/scripts/dualtor_neighbor_check.py +++ b/scripts/dualtor_neighbor_check.py @@ -304,12 +304,21 @@ def read_tables_from_db(appl_db): """Reads required tables from db.""" # NOTE: let's cache the db read script sha1 in APPL_DB under # key "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1" - db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY) - if not db_read_script_sha1: + def _load_script(): redis_load_cmd = "SCRIPT LOAD \"%s\"" % DB_READ_SCRIPT db_read_script_sha1 = redis_cli(redis_load_cmd).strip() WRITE_LOG_INFO("loaded script sha1: %s", db_read_script_sha1) appl_db.set(DB_READ_SCRIPT_CONFIG_DB_KEY, db_read_script_sha1) + return db_read_script_sha1 + + def _is_script_existed(script_sha1): + redis_script_exists_cmd = "SCRIPT EXISTS %s" % script_sha1 + cmd_output = redis_cli(redis_script_exists_cmd).strip() + return "1" in cmd_output + + db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY) + if ((not db_read_script_sha1) or (not _is_script_existed(db_read_script_sha1))): + db_read_script_sha1 = _load_script() redis_run_cmd = "EVALSHA %s 0" % db_read_script_sha1 result = redis_cli(redis_run_cmd).strip() diff --git a/tests/dualtor_neighbor_check_test.py b/tests/dualtor_neighbor_check_test.py index 5916a183a03..fb9475129cc 100644 --- a/tests/dualtor_neighbor_check_test.py +++ b/tests/dualtor_neighbor_check_test.py @@ -235,7 +235,7 @@ def test_read_from_db(self, mock_log_functions): assert asic_route_table == result[4] assert asic_neigh_table == result[5] - def test_read_from_db_with_lua_cache(self, mock_log_functions): + def test_read_from_db_script_not_existed(self, mock_log_functions): with patch("dualtor_neighbor_check.run_command") as mock_run_command: neighbors = {"192.168.0.2": "ee:86:d8:46:7d:01"} mux_states = {"Ethernet4": "active"} @@ -243,23 +243,73 @@ def test_read_from_db_with_lua_cache(self, mock_log_functions): asic_fdb = {"ee:86:d8:46:7d:01": "oid:0x3a00000000064b"} asic_route_table = [] asic_neigh_table = ["{\"ip\":\"192.168.0.23\",\"rif\":\"oid:0x6000000000671\",\"switch_id\":\"oid:0x21000000000000\"}"] - mock_run_command.return_value = json.dumps( - { - "neighbors": neighbors, - "mux_states": mux_states, - "hw_mux_states": hw_mux_states, - "asic_fdb": asic_fdb, - "asic_route_table": asic_route_table, - "asic_neigh_table": asic_neigh_table - } + mock_run_command.side_effect = [ + "(integer) 0", + "c53fd5eaad68be1e66a2fe80cd20a9cb18c91259", + json.dumps( + { + "neighbors": neighbors, + "mux_states": mux_states, + "hw_mux_states": hw_mux_states, + "asic_fdb": asic_fdb, + "asic_route_table": asic_route_table, + "asic_neigh_table": asic_neigh_table + } + ) + ] + mock_appl_db = MagicMock() + mock_appl_db.get = MagicMock(return_value="c53fd5eaad68be1e66a2fe80cd20a9cb18c91259") + + result = dualtor_neighbor_check.read_tables_from_db(mock_appl_db) + + mock_appl_db.get.assert_called_once_with("_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1") + mock_run_command.assert_has_calls( + [ + call("sudo redis-cli SCRIPT EXISTS c53fd5eaad68be1e66a2fe80cd20a9cb18c91259"), + call("sudo redis-cli SCRIPT LOAD \"%s\"" % dualtor_neighbor_check.DB_READ_SCRIPT), + call("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + ] ) + assert neighbors == result[0] + assert mux_states == result[1] + assert hw_mux_states == result[2] + assert {k: v.lstrip("oid:0x") for k, v in asic_fdb.items()} == result[3] + assert asic_route_table == result[4] + assert asic_neigh_table == result[5] + + def test_read_from_db_with_lua_cache(self, mock_log_functions): + with patch("dualtor_neighbor_check.run_command") as mock_run_command: + neighbors = {"192.168.0.2": "ee:86:d8:46:7d:01"} + mux_states = {"Ethernet4": "active"} + hw_mux_states = {"Ethernet4": "active"} + asic_fdb = {"ee:86:d8:46:7d:01": "oid:0x3a00000000064b"} + asic_route_table = [] + asic_neigh_table = ["{\"ip\":\"192.168.0.23\",\"rif\":\"oid:0x6000000000671\",\"switch_id\":\"oid:0x21000000000000\"}"] + mock_run_command.side_effect = [ + "(integer) 1", + json.dumps( + { + "neighbors": neighbors, + "mux_states": mux_states, + "hw_mux_states": hw_mux_states, + "asic_fdb": asic_fdb, + "asic_route_table": asic_route_table, + "asic_neigh_table": asic_neigh_table + } + ) + ] mock_appl_db = MagicMock() mock_appl_db.get = MagicMock(return_value="c53fd5eaad68be1e66a2fe80cd20a9cb18c91259") result = dualtor_neighbor_check.read_tables_from_db(mock_appl_db) mock_appl_db.get.assert_called_once_with("_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1") - mock_run_command.assert_called_once_with("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + mock_run_command.assert_has_calls( + [ + call("sudo redis-cli SCRIPT EXISTS c53fd5eaad68be1e66a2fe80cd20a9cb18c91259"), + call("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + ] + ) assert neighbors == result[0] assert mux_states == result[1] assert hw_mux_states == result[2] From bd86d33b29e1d868c975cf5d52fe95e7cb58f364 Mon Sep 17 00:00:00 2001 From: Geert Vlaemynck Date: Tue, 2 Apr 2024 03:30:15 +0200 Subject: [PATCH 26/45] [generate_dump] call hw-management-generate-dump.sh in collect_cisco_8000 (#2809) Signed-off-by: Geert Vlaemynck --- scripts/generate_dump | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index b08616444ea..ea3d3523331 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1564,6 +1564,27 @@ collect_cisco_8000() { echo "'/usr/share/sonic/device/${platform}' does not exist" > /tmp/error save_file /tmp/error sai false fi + + save_cmd "show platform versions" "platform.versions" + + # run 'hw-management-generate-dump.sh' script and save the result file + HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh + if [[ -x $HW_DUMP_FILE ]]; then + ${CMD_PREFIX} $HW_DUMP_FILE $ALLOW_PROCESS_STOP + ret=$? + if [[ $ret -ne 0 ]]; then + if [[ $ret -eq $TIMEOUT_EXIT_CODE ]]; then + echo "hw-management dump timedout after ${TIMEOUT_MIN} minutes." + else + echo "hw-management dump failed ..." + fi + else + save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false + rm -f /tmp/hw-mgmt-dump* + fi + else + echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" + fi } ############################################################################## From c0b6632e7c39d6ab2ba9cb44711df7c4e6ad3c1c Mon Sep 17 00:00:00 2001 From: Deepak Singhal <115033986+deepak-singhal0408@users.noreply.github.com> Date: Tue, 2 Apr 2024 23:13:41 -0700 Subject: [PATCH 27/45] T2-VOQ-VS: Modified exception handling due to new sonic_platform package support for VS (#3250) ### What I did For T2-Chassis VS support, we are adding new sonic_platform package for vs platforms. Please refer https://github.com/sonic-net/sonic-buildimage/pull/18512 for more details. Due to this new platform package, need to modify excpetion handling as now the Module would be found, but the metadata file will not be found for pizzabox vs platforms. #### How I did it Modified the exception handling logic. MSFT ADO: 27414904 #### How to verify it Bring up vms-kvm-t0 topology. ran show interface status. The output is proper. PS: the Main PR(https://github.com/sonic-net/sonic-buildimage/pull/18512) is dependent on this PR to be merged in first. --- utilities_common/platform_sfputil_helper.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utilities_common/platform_sfputil_helper.py b/utilities_common/platform_sfputil_helper.py index 0de83f05b02..741b5334ecc 100644 --- a/utilities_common/platform_sfputil_helper.py +++ b/utilities_common/platform_sfputil_helper.py @@ -121,10 +121,10 @@ def is_rj45_port(port_name): if not platform_sfp_base: import sonic_platform_base platform_sfp_base = sonic_platform_base.sfp_base.SfpBase - except ModuleNotFoundError as e: + except (ModuleNotFoundError, FileNotFoundError) as e: # This method is referenced by intfutil which is called on vs image - # However, there is no platform API supported on vs image - # So False is returned in such case + # sonic_platform API support is added for vs image(required for chassis), it expects a metadata file, which + # wont be available on vs pizzabox duts, So False is returned(if either ModuleNotFound or FileNotFound) return False if platform_chassis and platform_sfp_base: From c57bf8169f6fbdab9fca6f0c7f2a067f71942cb6 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 8 Apr 2024 10:00:23 -0400 Subject: [PATCH 28/45] Improve Semgrep CI (#3259) What I did Semgrep's default ruleset (p/default) somehow lost some important rules How I did it Keep use p/default and add another rule How to verify it Added test code to this PR and Semgrep CI failed Failed result: https://github.com/sonic-net/sonic-utilities/actions/runs/8559846841/job/23457508614?pr=3259 Signed-off-by: Mai Bui --- .github/workflows/semgrep.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml index 8ebe082f50a..1686f20364e 100644 --- a/.github/workflows/semgrep.yml +++ b/.github/workflows/semgrep.yml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v3 - run: semgrep ci env: - SEMGREP_RULES: p/default + SEMGREP_RULES: "p/default r/python.lang.security.audit.dangerous-system-call-audit.dangerous-system-call-audit" From e914198db543aa5ca7f4801bae46dfaef870225d Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 9 Apr 2024 10:53:49 -0700 Subject: [PATCH 29/45] [Mellanox]Fix Syntax Warning in config mlnx command (#3254) ### What I did Fixing mellanox related warnings in https://github.com/sonic-net/sonic-buildimage/issues/18401 Fix syntax warnings when config platform mlnx command is executed ``` dmin@sonic:~$ sudo config /usr/local/lib/python3.11/dist-packages/config/aaa.py:120: SyntaxWarning: "is" with a literal. Did you mean "=="? if len(auth_protocol) is 0: /usr/local/lib/python3.11/dist-packages/config/plugins/mlnx.py:219: SyntaxWarning: "is not" with a literal. Did you mean "!="? if err is not 0: /usr/local/lib/python3.11/dist-packages/config/plugins/mlnx.py:232: SyntaxWarning: "is not" with a literal. Did you mean "!="? if err is not 0: /usr/local/lib/python3.11/dist-packages/config/aaa.py:120: SyntaxWarning: "is" with a literal. Did you mean "=="? if len(auth_protocol) is 0: /usr/local/lib/python3.11/dist-packages/config/plugins/mlnx.py:219: SyntaxWarning: "is not" with a literal. Did you mean "!="? if err is not 0: /usr/local/lib/python3.11/dist-packages/config/plugins/mlnx.py:232: SyntaxWarning: "is not" with a literal. Did you mean "!="? if err is not 0: ``` #### How I did it Replaced is not with != #### How to verify it Run the command as well as added UT --- config/plugins/mlnx.py | 4 ++-- tests/config_mlnx_test.py | 47 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 tests/config_mlnx_test.py diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index 75846d54e3e..19b39b4336d 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -216,7 +216,7 @@ def sdk_sniffer_enable(): env_variable_string=sdk_sniffer_env_variable_string) if not ignore: err = restart_swss() - if err is not 0: + if err != 0: return click.echo('SDK sniffer is Enabled, recording file is %s.' % sdk_sniffer_filename) else: @@ -229,7 +229,7 @@ def sdk_sniffer_disable(): ignore = sniffer_env_variable_set(enable=False, env_variable_name=ENV_VARIABLE_SX_SNIFFER) if not ignore: err = restart_swss() - if err is not 0: + if err != 0: return click.echo("SDK sniffer is Disabled.") else: diff --git a/tests/config_mlnx_test.py b/tests/config_mlnx_test.py new file mode 100644 index 00000000000..0cf2e117b40 --- /dev/null +++ b/tests/config_mlnx_test.py @@ -0,0 +1,47 @@ +import sys +import click +import pytest +import config.plugins.mlnx as config +from unittest.mock import patch, Mock +from click.testing import CliRunner +from utilities_common.db import Db + + +@patch('config.plugins.mlnx.sniffer_env_variable_set', Mock(return_value=False)) +@patch('config.plugins.mlnx.sniffer_filename_generate', Mock(return_value="sdk_file_name")) +class TestConfigMlnx(object): + def setup(self): + print('SETUP') + + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) + def test_config_sniffer_enable(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) + assert "SDK sniffer is Enabled, recording file is sdk_file_name." in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) + def test_config_sniffer_disble(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) + assert "SDK sniffer is Disabled." in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) + def test_config_sniffer_enable_fail(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) + assert "SDK sniffer is Enabled, recording file is sdk_file_name." not in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) + def test_config_sniffer_disble_fail(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) + assert "SDK sniffer is Disabled." not in result.output + + def teardown(self): + print('TEARDOWN') + From 9f962299ff9cc3319c77db24cae065e650c774e2 Mon Sep 17 00:00:00 2001 From: Vivek Date: Thu, 11 Apr 2024 15:10:58 -0700 Subject: [PATCH 30/45] Update intfutil and sfpshow to support DPC role (#3242) Signed-off-by: Vivek Reddy --- scripts/intfutil | 8 ++++++-- scripts/sfpshow | 13 +++++++----- tests/intfutil_test.py | 34 ++++++++++++++++---------------- tests/mock_tables/appl_db.json | 3 ++- tests/mock_tables/config_db.json | 3 ++- tests/mock_tables/state_db.json | 18 ----------------- tests/sfp_test.py | 22 +++++++++++++++++++++ 7 files changed, 57 insertions(+), 44 deletions(-) diff --git a/scripts/intfutil b/scripts/intfutil index eb40a491869..69472760d85 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -29,6 +29,7 @@ from utilities_common import multi_asic as multi_asic_util from utilities_common.intf_filter import parse_interface_in_filter from utilities_common.platform_sfputil_helper import is_rj45_port, RJ45_PORT_TYPE from sonic_py_common.interface import get_intf_longname +from sonic_py_common import multi_asic # ========================== Common interface-utils logic ========================== @@ -53,6 +54,7 @@ PORT_INTERFACE_TYPE = 'interface_type' PORT_ADV_INTERFACE_TYPES = 'adv_interface_types' PORT_TPID = "tpid" OPTICS_TYPE_RJ45 = RJ45_PORT_TYPE +TYPE_DPC = 'DPU-NPU Data Port' PORT_LINK_TRAINING = 'link_training' PORT_LINK_TRAINING_STATUS = 'link_training_status' @@ -214,15 +216,17 @@ def port_oper_speed_get_raw(db, intf_name): speed = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_SPEED) return speed -def port_optics_get(state_db, intf_name, type): +def port_optics_get(db, intf_name, type): """ Get optic type info for port """ full_table_id = PORT_TRANSCEIVER_TABLE_PREFIX + intf_name - optics_type = state_db.get(state_db.STATE_DB, full_table_id, type) + optics_type = db.get(db.STATE_DB, full_table_id, type) if optics_type is None: if is_rj45_port(intf_name): return OPTICS_TYPE_RJ45 + elif db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, multi_asic.PORT_ROLE) == multi_asic.DPU_CONNECT_PORT: + return TYPE_DPC else: return "N/A" return optics_type diff --git a/scripts/sfpshow b/scripts/sfpshow index 85e8d8a1f10..2d647176dad 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -14,7 +14,6 @@ from typing import Dict import click from natsort import natsorted -from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix, recirc_prefix from sonic_py_common import multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string from utilities_common.sfp_helper import ( @@ -544,6 +543,10 @@ class SFPShow(object): output = ZR_PM_NOT_APPLICABLE_STR + '\n' return output + def is_valid_physical_port(self, port_name): + role = self.db.get(self.db.APPL_DB, 'PORT_TABLE:{}'.format(port_name), multi_asic.PORT_ROLE) + return multi_asic.is_front_panel_port(port_name, role) + @multi_asic_util.run_on_multi_asic def get_eeprom(self): if self.intf_name is not None: @@ -553,7 +556,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_eeprom[interface] = self.convert_interface_sfp_info_to_cli_output_string( self.db, interface, self.dump_dom) @@ -577,7 +580,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: key = re.split(':', i, maxsplit=1)[-1].strip() - if key and key.startswith(front_panel_prefix()) and not key.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if key and self.is_valid_physical_port(key): presence_string = self.convert_interface_sfp_presence_state_to_cli_output_string(self.db, key) port_table.append((key, presence_string)) @@ -592,7 +595,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_pm[interface] = self.convert_interface_sfp_pm_to_cli_output_string( self.db, interface) @@ -605,7 +608,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_status[interface] = self.convert_interface_sfp_status_to_cli_output_string( self.db, interface) diff --git a/tests/intfutil_test.py b/tests/intfutil_test.py index 469c73e0718..f0c75a4c0f5 100644 --- a/tests/intfutil_test.py +++ b/tests/intfutil_test.py @@ -11,23 +11,23 @@ scripts_path = os.path.join(modules_path, "scripts") show_interface_status_output="""\ - Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC ---------------- --------------- ------- ----- ----- --------- --------------- ------ ------- --------------- ---------- - Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off - Ethernet16 16 100M 9100 N/A etp5 trunk up up RJ45 off - Ethernet24 24 1G 9100 N/A etp6 trunk up up QSFP28 or later off - Ethernet28 28 1000M 9100 N/A etp8 trunk up up RJ45 off - Ethernet32 13,14,15,16 40G 9100 rs etp9 PortChannel1001 up up N/A off - Ethernet36 9,10,11,12 10M 9100 N/A etp10 routed up up RJ45 off - Ethernet112 93,94,95,96 40G 9100 rs etp29 PortChannel0001 up up N/A off - Ethernet116 89,90,91,92 40G 9100 rs etp30 PortChannel0002 up up N/A off - Ethernet120 101,102,103,104 40G 9100 rs etp31 PortChannel0003 up up N/A off - Ethernet124 97,98,99,100 40G 9100 auto etp32 PortChannel0004 up up N/A off -PortChannel0001 N/A 40G 9100 N/A N/A routed down up N/A N/A -PortChannel0002 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel0003 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel0004 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel1001 N/A 40G 9100 N/A N/A trunk N/A N/A N/A N/A + Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC +--------------- --------------- ------- ----- ----- --------- --------------- ------ ------- ----------------- ---------- + Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off + Ethernet16 16 100M 9100 N/A etp5 trunk up up RJ45 off + Ethernet24 24 1G 9100 N/A etp6 trunk up up DPU-NPU Data Port off + Ethernet28 28 1000M 9100 N/A etp8 trunk up up RJ45 off + Ethernet32 13,14,15,16 40G 9100 rs etp9 PortChannel1001 up up N/A off + Ethernet36 9,10,11,12 10M 9100 N/A etp10 routed up up RJ45 off + Ethernet112 93,94,95,96 40G 9100 rs etp29 PortChannel0001 up up N/A off + Ethernet116 89,90,91,92 40G 9100 rs etp30 PortChannel0002 up up N/A off + Ethernet120 101,102,103,104 40G 9100 rs etp31 PortChannel0003 up up N/A off + Ethernet124 97,98,99,100 40G 9100 auto etp32 PortChannel0004 up up N/A off +PortChannel0001 N/A 40G 9100 N/A N/A routed down up N/A N/A +PortChannel0002 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel0003 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel0004 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel1001 N/A 40G 9100 N/A N/A trunk N/A N/A N/A N/A """ show_interface_status_Ethernet32_output="""\ diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 2889e6b202e..e967caa7585 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -79,7 +79,8 @@ "pfc_asym": "off", "mtu": "9100", "tpid": "0x8100", - "admin_status": "up" + "admin_status": "up", + "role": "Dpc" }, "PORT_TABLE:Ethernet28": { "index": "7", diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 2a81f96bfac..325d3eabe3c 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -97,7 +97,8 @@ "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "1000" + "speed": "1000", + "role": "Dpc" }, "PORT|Ethernet28": { "admin_status": "up", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index b622705be1e..d1da74ae4bc 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -173,24 +173,6 @@ "nominal_bit_rate": "N/A", "application_advertisement": "N/A" }, - "TRANSCEIVER_INFO|Ethernet24": { - "type": "QSFP28 or later", - "hardware_rev": "AC", - "serial": "MT1706FT02066", - "manufacturer": "Mellanox", - "model": "MFA1A00-C003", - "vendor_oui": "00-02-c9", - "vendor_date": "2017-01-13 ", - "connector": "No separable connector", - "encoding": "64B66B", - "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", - "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", - "cable_type": "Length Cable Assembly(m)", - "cable_length": "3", - "specification_compliance": "{'10/40G Ethernet Compliance Code': '40G Active Cable (XLPPI)'}", - "nominal_bit_rate": "255", - "application_advertisement": "N/A" - }, "TRANSCEIVER_INFO|Ethernet28": { "type": "RJ45", "hardware_rev": "N/A", diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 22eda401e57..d1f03280fe3 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -885,6 +885,28 @@ def test_sfp_presence(self): assert result.exit_code == 0 assert result.output == expected + def test_sfp_dpc_ports(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + def test_sfp_eeprom_with_dom(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0", "-d"]) From f81317c358e3340775eb1c494f7e5d2b8f8085e7 Mon Sep 17 00:00:00 2001 From: sophiek Date: Wed, 17 Apr 2024 09:44:56 +0300 Subject: [PATCH 31/45] [Mellanox] added component versions to techsupport (#3264) Added get component versions to techsupport get_component_versions.py is a script that output a table that gathers the versions of all the Nvidia-related components in SONiC. --- scripts/generate_dump | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index ea3d3523331..9dcb62afe67 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1251,6 +1251,7 @@ collect_mellanox() { echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" fi + save_cmd "get_component_versions.py" "component_versions" } ############################################################################### From 29d4e88f4893948cf381d181b40c58395acf23b4 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Thu, 18 Apr 2024 14:07:33 +0800 Subject: [PATCH 32/45] Add multi ASIC support for syslog rate limit feature (#3235) * Add multi ASIC support for syslog rate limit feature * Update command ref --- config/syslog.py | 118 +++++++++-- doc/Command-Reference.md | 78 ++++++- show/syslog.py | 69 +++++- syslog_util/common.py | 31 ++- tests/mock_tables/asic0/config_db.json | 8 + tests/mock_tables/asic1/config_db.json | 8 + tests/mock_tables/config_db.json | 23 +- tests/syslog_multi_asic_test.py | 281 +++++++++++++++++++++++++ tests/syslog_test.py | 1 - 9 files changed, 586 insertions(+), 31 deletions(-) create mode 100644 tests/syslog_multi_asic_test.py diff --git a/config/syslog.py b/config/syslog.py index 7533a7f71f2..a5d520d9cf7 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -5,7 +5,9 @@ import subprocess import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util from sonic_py_common import logger +from sonic_py_common import multi_asic from syslog_util import common as syslog_common @@ -457,20 +459,46 @@ def delete(db, server_ip_address): def rate_limit_host(db, interval, burst): """ Configure syslog rate limit for host """ syslog_common.rate_limit_validator(interval, burst) - syslog_common.save_rate_limit_to_db(db, None, interval, burst, log) + syslog_common.save_rate_limit_to_db(db.cfgdb, None, interval, burst, log) @syslog.command("rate-limit-container") @click.argument("service_name", required=True) @click.option("-i", "--interval", help="Configures syslog rate limit interval in seconds for specified containers", type=click.IntRange(0, 2147483647)) @click.option("-b", "--burst", help="Configures syslog rate limit burst in number of messages for specified containers", type=click.IntRange(0, 2147483647)) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def rate_limit_container(db, service_name, interval, burst): +def rate_limit_container(db, service_name, interval, burst, namespace): """ Configure syslog rate limit for containers """ syslog_common.rate_limit_validator(interval, burst) - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + # for all namespaces + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace == multi_asic.DEFAULT_NAMESPACE: + feature_data = global_feature_data + else: + feature_data = per_ns_feature_data + if service_name and service_name not in feature_data: + continue + syslog_common.service_validator(feature_data, service_name) + syslog_common.save_rate_limit_to_db(cfg_db, service_name, interval, burst, log) + return + elif namespace == 'default': + # for default/global namespace only + namespace = multi_asic.DEFAULT_NAMESPACE + feature_data = global_feature_data + else: + # for a specific namespace + feature_data = per_ns_feature_data + syslog_common.service_validator(feature_data, service_name) - syslog_common.save_rate_limit_to_db(db, service_name, interval, burst, log) + syslog_common.save_rate_limit_to_db(db.cfgdb_clients[namespace], service_name, interval, burst, log) @syslog.group( @@ -482,14 +510,70 @@ def rate_limit_feature(): pass +def get_feature_names_to_proceed(db, service_name, namespace): + """Get feature name list to be proceed by "config syslog rate-limit-feature enable" and + "config syslog rate-limit-feature disable" CLIs + + Args: + db (object): Db object + service_name (str): Nullable service name to be enable/disable + namespace (str): Namespace provided by user + + Returns: + list: A list of feature name + """ + features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + if service_name: + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + if not service_name: + feature_list = [feature_name for feature_name in global_feature_data.keys()] + if multi_asic.is_multi_asic(): + asic_count = multi_asic.get_num_asics() + for i in range(asic_count): + feature_list.extend([f'{feature_name}{i}' for feature_name in per_ns_feature_data.keys()]) + else: + feature_config = features[service_name] + feature_list = [] + if feature_config[syslog_common.FEATURE_HAS_GLOBAL_SCOPE].lower() == 'true': + feature_list.append(service_name) + + if multi_asic.is_multi_asic(): + if feature_config[syslog_common.FEATURE_HAS_PER_ASIC_SCOPE].lower() == 'true': + asic_count = multi_asic.get_num_asics() + for i in range(asic_count): + feature_list.append(multi_asic.get_container_name_from_asic_id(service_name, i)) + elif namespace == 'default': + if not service_name: + feature_list = [feature_name for feature_name in global_feature_data.keys()] + else: + syslog_common.service_validator(global_feature_data, service_name) + feature_list = [service_name] + else: + asic_num = multi_asic.get_asic_id_from_name(namespace) + if not service_name: + feature_list = [multi_asic.get_container_name_from_asic_id(feature_name, asic_num) for feature_name in per_ns_feature_data.keys()] + else: + syslog_common.service_validator(per_ns_feature_data, service_name) + feature_list = [multi_asic.get_container_name_from_asic_id(service_name, asic_num)] + return feature_list + + @rate_limit_feature.command("enable") +@click.argument("service_name", required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def enable_rate_limit_feature(db): +def enable_rate_limit_feature(db, service_name, namespace): """ Enable syslog rate limit feature """ - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - for feature_name in feature_data.keys(): + feature_list = get_feature_names_to_proceed(db, service_name, namespace) + for feature_name in feature_list: click.echo(f'Enabling syslog rate limit feature for {feature_name}') - output, _ = clicommon.run_command(['docker', 'ps', '-q', '-f', 'status=running', '-f', f'name={feature_name}'], return_cmd=True) + shell_cmd = f'docker ps -f status=running --format "{{{{.Names}}}}" | grep -E "^{feature_name}$"' + output, _ = clicommon.run_command(shell_cmd, return_cmd=True, shell=True) if not output: click.echo(f'{feature_name} is not running, ignoring...') continue @@ -517,16 +601,21 @@ def enable_rate_limit_feature(db): if not failed: click.echo(f'Enabled syslog rate limit feature for {feature_name}') - - + + @rate_limit_feature.command("disable") +@click.argument("service_name", required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def disable_rate_limit_feature(db): +def disable_rate_limit_feature(db, service_name, namespace): """ Disable syslog rate limit feature """ - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - for feature_name in feature_data.keys(): + feature_list = get_feature_names_to_proceed(db, service_name, namespace) + for feature_name in feature_list: click.echo(f'Disabling syslog rate limit feature for {feature_name}') - output, _ = clicommon.run_command(['docker', 'ps', '-q', '-f', 'status=running', '-f', f'name={feature_name}'], return_cmd=True) + shell_cmd = f'docker ps -f status=running --format "{{{{.Names}}}}" | grep -E "^{feature_name}$"' + output, _ = clicommon.run_command(shell_cmd, return_cmd=True, shell=True) if not output: click.echo(f'{feature_name} is not running, ignoring...') continue @@ -553,4 +642,3 @@ def disable_rate_limit_feature(db): if not failed: click.echo(f'Disabled syslog rate limit feature for {feature_name}') - diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 0709b6d4f1d..e97922af656 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -10149,7 +10149,7 @@ This command displays rate limit configuration for containers. - Usage ``` - show syslog rate-limit-container [] + show syslog rate-limit-container [] -n [] ``` - Example: @@ -10173,6 +10173,37 @@ This command displays rate limit configuration for containers. SERVICE INTERVAL BURST -------------- ---------- ------- bgp 0 0 + + # Multi ASIC + show syslog rate-limit-container + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 N/A + snmp 300 20000 + swss 2000 12000 + Namespace asic0: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 N/A + snmp 300 20000 + swss 2000 12000 + + # Multi ASIC + show syslog rate-limit-container bgp + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 + Namespace asic0: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 + + # Multi ASIC + show syslog rate-limit-container bgp -n asic1 + Namespace asic1: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 ``` ### Syslog Config Commands @@ -10251,10 +10282,19 @@ This command is used to configure syslog rate limit for containers. - Parameters: - _interval_: determines the amount of time that is being measured for rate limiting. - _burst_: defines the amount of messages, that have to occur in the time limit of interval, to trigger rate limiting + - _namespace_: namespace name or all. Value "default" indicates global namespace. - Example: ``` + # Config bgp for all namespaces. For multi ASIC platforms, bgp service in all namespaces will be affected. + # For single ASIC platforms, bgp service in global namespace will be affected. admin@sonic:~$ sudo config syslog rate-limit-container bgp --interval 300 --burst 20000 + + # Config bgp for global namespace only. + config syslog rate-limit-container bgp --interval 300 --burst 20000 -n default + + # Config bgp for asic0 namespace only. + config syslog rate-limit-container bgp --interval 300 --burst 20000 -n asic0 ``` **config syslog rate-limit-feature enable** @@ -10263,12 +10303,28 @@ This command is used to enable syslog rate limit feature. - Usage: ``` - config syslog rate-limit-feature enable + config syslog rate-limit-feature enable [] -n [] ``` - Example: ``` + # Enable syslog rate limit for all services in all namespaces admin@sonic:~$ sudo config syslog rate-limit-feature enable + + # Enable syslog rate limit for all services in global namespace + config syslog rate-limit-feature enable -n default + + # Enable syslog rate limit for all services in asic0 namespace + config syslog rate-limit-feature enable -n asic0 + + # Enable syslog rate limit for database in all namespaces + config syslog rate-limit-feature enable database + + # Enable syslog rate limit for database in default namespace + config syslog rate-limit-feature enable database -n default + + # Enable syslog rate limit for database in asci0 namespace + config syslog rate-limit-feature enable database -n asci0 ``` **config syslog rate-limit-feature disable** @@ -10277,12 +10333,28 @@ This command is used to disable syslog rate limit feature. - Usage: ``` - config syslog rate-limit-feature disable + config syslog rate-limit-feature disable [] -n [] ``` - Example: ``` + # Disable syslog rate limit for all services in all namespaces admin@sonic:~$ sudo config syslog rate-limit-feature disable + + # Disable syslog rate limit for all services in global namespace + config syslog rate-limit-feature disable -n default + + # Disable syslog rate limit for all services in asic0 namespace + config syslog rate-limit-feature disable -n asic0 + + # Disable syslog rate limit for database in all namespaces + config syslog rate-limit-feature disable database + + # Disable syslog rate limit for database in default namespace + config syslog rate-limit-feature disable database -n default + + # Disable syslog rate limit for database in asci0 namespace + config syslog rate-limit-feature disable database -n asci0 ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) diff --git a/show/syslog.py b/show/syslog.py index d258be3351d..ad4e7b5b854 100644 --- a/show/syslog.py +++ b/show/syslog.py @@ -1,9 +1,12 @@ +from unicodedata import name import click import tabulate from natsort import natsorted import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util +from sonic_py_common import multi_asic from syslog_util import common as syslog_common @@ -83,8 +86,11 @@ def rate_limit_host(db): name='rate-limit-container' ) @click.argument('service_name', metavar='', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def rate_limit_container(db, service_name): +def rate_limit_container(db, service_name, namespace): """ Show syslog rate limit configuration for containers """ header = [ @@ -92,16 +98,57 @@ def rate_limit_container(db, service_name): "INTERVAL", "BURST", ] - body = [] + + # Feature configuration in global DB features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - + if service_name: + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + # for all namespaces + is_first = True + for namespace, cfg_db in natsorted(db.cfgdb_clients.items()): + if is_first: + is_first = False + else: + # add a new blank line between each namespace + click.echo('\n') + + if namespace == multi_asic.DEFAULT_NAMESPACE: + if service_name and service_name not in global_feature_data: + continue + echo_rate_limit_config(header, cfg_db, service_name, global_feature_data) + else: + if service_name and service_name not in per_ns_feature_data: + continue + echo_rate_limit_config(header, cfg_db, service_name, per_ns_feature_data, namespace) + elif namespace == 'default': + # for default/global namespace only + echo_rate_limit_config(header, db.cfgdb, service_name, global_feature_data) + else: + # for a specific namespace + echo_rate_limit_config(header, db.cfgdb_clients[namespace], service_name, per_ns_feature_data, namespace) + + +def echo_rate_limit_config(header, db, service_name, features, namespace=None): + """Echo rate limit configuration + + Args: + header (list): CLI headers + db (object): Db object + service_name (str): Nullable service name to be printed. + features (dict): Feature data got from CONFIG DB + namespace (str, optional): Namespace provided by user. Defaults to None. + """ + body = [] if service_name: syslog_common.service_validator(features, service_name) service_list = [service_name] else: - service_list = [name for name, service_config in features.items() if service_config.get(syslog_common.SUPPORT_RATE_LIMIT, '').lower() == 'true'] - - syslog_configs = db.cfgdb.get_table(syslog_common.SYSLOG_CONFIG_FEATURE_TABLE) + service_list = features.keys() + + syslog_configs = db.get_table(syslog_common.SYSLOG_CONFIG_FEATURE_TABLE) for service in natsorted(service_list): if service in syslog_configs: entry = syslog_configs[service] @@ -110,5 +157,11 @@ def rate_limit_container(db, service_name): entry.get(syslog_common.SYSLOG_RATE_LIMIT_BURST, 'N/A')]) else: body.append([service, 'N/A', 'N/A']) - - click.echo(format(header, body)) + + if namespace: + click.echo(f'Namespace {namespace}:') + + if body: + click.echo(format(header, body)) + else: + click.echo('N/A') diff --git a/syslog_util/common.py b/syslog_util/common.py index 5282c088e8f..742e6ae059d 100644 --- a/syslog_util/common.py +++ b/syslog_util/common.py @@ -1,4 +1,5 @@ import click +from sonic_py_common import multi_asic FEATURE_TABLE = "FEATURE" @@ -9,6 +10,8 @@ SYSLOG_RATE_LIMIT_INTERVAL = 'rate_limit_interval' SYSLOG_RATE_LIMIT_BURST = 'rate_limit_burst' SUPPORT_RATE_LIMIT = 'support_syslog_rate_limit' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' def rate_limit_validator(interval, burst): @@ -70,7 +73,33 @@ def save_rate_limit_to_db(db, service_name, interval, burst, log): data[SYSLOG_RATE_LIMIT_INTERVAL] = interval if burst is not None: data[SYSLOG_RATE_LIMIT_BURST] = burst - db.cfgdb.mod_entry(table, key, data) + db.mod_entry(table, key, data) log.log_notice(f"Configured syslog {service_name} rate-limits: interval={data.get(SYSLOG_RATE_LIMIT_INTERVAL, 'N/A')},\ burst={data.get(SYSLOG_RATE_LIMIT_BURST, 'N/A')}") + +def extract_feature_data(features): + """Extract feature data in global scope and feature data in per ASIC namespace scope + + Args: + features (dict): Feature data got from CONFIG DB + + Returns: + tuple: + """ + global_feature_data = {} + per_ns_feature_data = {} + is_multi_asic = multi_asic.is_multi_asic() + for feature_name, feature_config in features.items(): + if not feature_config.get(SUPPORT_RATE_LIMIT, '').lower() == 'true': + continue + + if is_multi_asic: + if feature_config.get(FEATURE_HAS_GLOBAL_SCOPE, '').lower() == 'true': + global_feature_data[feature_name] = feature_config + + if feature_config.get(FEATURE_HAS_PER_ASIC_SCOPE, '').lower() == 'true': + per_ns_feature_data[feature_name] = feature_config + else: + global_feature_data[feature_name] = feature_config + return global_feature_data, per_ns_feature_data diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 3117115489b..7b4cac3430e 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -294,5 +294,13 @@ "monErrThreshRxCells": "61035156", "monPollThreshIsolation": "1", "monPollThreshRecovery": "8" + }, + "SYSLOG_CONFIG_FEATURE|bgp": { + "rate_limit_interval": "111", + "rate_limit_burst": "33333" + }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "222", + "rate_limit_burst": "22222" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 95cf040544d..56823ae113b 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -234,5 +234,13 @@ "monErrThreshRxCells": "61035156", "monPollThreshIsolation": "1", "monPollThreshRecovery": "8" + }, + "SYSLOG_CONFIG_FEATURE|bgp": { + "rate_limit_interval": "444", + "rate_limit_burst": "44444" + }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "555", + "rate_limit_burst": "55555" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 325d3eabe3c..5cd30c986cc 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -794,13 +794,19 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", - "set_owner": "local" + "set_owner": "local", + "support_syslog_rate_limit": "true", + "has_global_scope": "false", + "has_per_asic_scope": "true" }, "FEATURE|database": { "state": "always_enabled", "auto_restart": "always_enabled", "high_mem_alert": "disabled", - "set_owner": "local" + "set_owner": "local", + "support_syslog_rate_limit": "true", + "has_global_scope": "true", + "has_per_asic_scope": "true" }, "FEATURE|dhcp_relay": { "state": "enabled", @@ -824,7 +830,10 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", - "set_owner": "kube" + "set_owner": "kube", + "support_syslog_rate_limit": "true", + "has_global_scope": "true", + "has_per_asic_scope": "false" }, "FEATURE|radv": { "state": "enabled", @@ -874,6 +883,14 @@ "high_mem_alert": "disabled", "set_owner": "kube" }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "200", + "rate_limit_burst": "20000" + }, + "SYSLOG_CONFIG_FEATURE|pmon": { + "rate_limit_interval": "100", + "rate_limit_burst": "10000" + }, "DEVICE_METADATA|localhost": { "default_bgp_status": "down", "default_pfcwd_status": "enable", diff --git a/tests/syslog_multi_asic_test.py b/tests/syslog_multi_asic_test.py new file mode 100644 index 00000000000..7933edcd669 --- /dev/null +++ b/tests/syslog_multi_asic_test.py @@ -0,0 +1,281 @@ +import mock +import pytest +from click.testing import CliRunner +from importlib import reload +from utilities_common.db import Db + +show_all_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +pmon 100 10000 + + +Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 111 33333 +database 222 22222 + + +Namespace asic1: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 444 44444 +database 555 55555 +""" + +show_global_ns_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +pmon 100 10000 +""" + +show_asic0_ns_config = """Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 111 33333 +database 222 22222 +""" + +show_all_ns_database_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 + + +Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 222 22222 + + +Namespace asic1: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 555 55555 +""" + +show_global_ns_database_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +""" + +show_asic0_ns_database_config = """Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 222 22222 +""" + + +@pytest.fixture(scope='module') +def setup_cmd_module(): + # Mock to multi ASIC + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + + import show.main as show + import config.main as config + + # Refresh syslog module for show and config + import show.syslog as show_syslog + reload(show_syslog) + show.cli.add_command(show_syslog.syslog) + + import config.syslog as config_syslog + reload(config_syslog) + config.config.add_command(config_syslog.syslog) + + yield show, config + + # Mock back to single ASIC + from .mock_tables import mock_single_asic + reload(mock_single_asic) + + # Refresh syslog module for show and config + reload(show_syslog) + show.cli.add_command(show_syslog.syslog) + + reload(config_syslog) + config.config.add_command(config_syslog.syslog) + + +class TestSyslogRateLimitMultiAsic: + def test_show_rate_limit_container(self, setup_cmd_module): + show, _ = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], + [] + ) + + assert result.output == show_all_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["-n", "default"] + ) + + assert result.output == show_global_ns_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["-n", "asic0"] + ) + + assert result.output == show_asic0_ns_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database"] + ) + + assert result.output == show_all_ns_database_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database", "-n", "default"] + ) + + assert result.output == show_global_ns_database_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database", "-n", "asic0"] + ) + + assert result.output == show_asic0_ns_database_config + assert result.exit_code == 0 + + def test_config_rate_limit_container(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + db = Db() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["database", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for cfg_db in db.cfgdb_clients.values(): + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'database') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["bgp", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace != '': + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'bgp') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + else: + table = cfg_db.get_table('SYSLOG_CONFIG_FEATURE') + assert 'bgp' not in table + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["pmon", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace == '': + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'pmon') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + else: + table = cfg_db.get_table('SYSLOG_CONFIG_FEATURE') + assert 'pmon' not in table + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["pmon", "--interval", 2, "--burst", 200, "-n", "default"], obj=db + ) + assert result.exit_code == 0 + cfg_db = db.cfgdb_clients[''] + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'pmon') + assert data['rate_limit_burst'] == '200' + assert data['rate_limit_interval'] == '2' + + @mock.patch('config.syslog.clicommon.run_command', mock.MagicMock(return_value=('', 0))) + def test_enable_syslog_rate_limit_feature(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], [] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['-n', 'asic0'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], ['database'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['database', '-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['database', '-n', 'asic0'] + ) + assert result.exit_code == 0 + + @mock.patch('config.syslog.clicommon.run_command', mock.MagicMock(return_value=('', 0))) + def test_disable_syslog_rate_limit_feature(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], [] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['-n', 'asic0'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], ['database'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['database', '-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['database', '-n', 'asic0'] + ) + assert result.exit_code == 0 diff --git a/tests/syslog_test.py b/tests/syslog_test.py index 44915b6d369..c1cbee11273 100644 --- a/tests/syslog_test.py +++ b/tests/syslog_test.py @@ -484,4 +484,3 @@ def side_effect(*args, **kwargs): config.config.commands["syslog"].commands["rate-limit-feature"].commands["disable"], obj=db ) assert result.exit_code == SUCCESS - From ad464a9f0e7003fe12adafad28b61c0df0bf29fc Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Thu, 18 Apr 2024 10:51:18 +0300 Subject: [PATCH 33/45] [Mellanox] Support new platform SN5400 in generic configuration update (#3272) - What I did Add support for a new platform x86_64-nvidia_sn5400-r0 - How to verify it Manual and unit test --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 6fa65be21fb..76020af984e 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5600"] + "spc4": [ "ACS-SN5400", "ACS-SN5600" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From eb165f361b88f3c374ab84c7b6fb676226aa6837 Mon Sep 17 00:00:00 2001 From: Yuanzhe <150663541+yuazhe@users.noreply.github.com> Date: Sat, 20 Apr 2024 01:09:31 +0800 Subject: [PATCH 34/45] Fix double hex to decimal conversion (#3267) In the previous commit with hash a3cf5c that aimed to address the issue where sfputil incorrectly interpreted page numbers as decimal instead of hexadecimal, there was an inadvertent double conversion from hexadecimal to decimal. For instance, inputting 11 resulted in conversion to 17 and then further to 23. To rectify this, the second conversion would be removed. A related ut has also been added. Signed-off-by: Yuanzhe, Liu --- sfputil/main.py | 2 +- tests/sfputil_test.py | 90 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) diff --git a/sfputil/main.py b/sfputil/main.py index dccbae60bbf..ad0b1b3775e 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -705,7 +705,7 @@ def eeprom_hexdump(port, page): page = 0 else: page = validate_eeprom_page(page) - return_code, output = eeprom_hexdump_single_port(port, int(str(page), base=16)) + return_code, output = eeprom_hexdump_single_port(port, page) click.echo(output) sys.exit(return_code) else: diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 9eda8ca249f..523848ec453 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -669,6 +669,96 @@ def test_show_eeprom_hexdump_read_eeprom_not_implemented(self, mock_chassis): expected_output = "Sfp.read_eeprom() is currently not implemented for this platform\n" assert result.output == expected_output + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.isinstance', MagicMock(return_value=True)) + def test_show_eeprom_hexdump_processing_hex_page_number(self, mock_chassis): + lower_page_bytearray = bytearray([13, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + upper_page0_bytearray = bytearray([13, 0, 35, 8, 0, 0, 0, 65, 128, 128, 245, 0, 0, 0, 0, 0, 0, 0, 1, 160, 77, 111, 108, 101, 120, 32, 73, 110, 99, 46, 32, 32, 32, 32, 32, 32, 7, 0, 9, 58, 49, 49, 49, 48, 52, 48, 49, 48, 53, 52, 32, 32, 32, 32, 32, 32, 32, 32, 3, 4, 0, 0, 70, 196, 0, 0, 0, 0, 54, 49, 49, 48, 51, 48, 57, 50, 57, 32, 32, 32, 32, 32, 32, 32, 49, 54, 48, 52, 49, 57, 32, 32, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + page10_expected_output = '''EEPROM hexdump for port Ethernet0 page 10h + Lower page 0h + 00000000 0d 00 06 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000010 00 00 00 00 00 00 01 81 00 00 00 00 00 00 00 00 |................| + 00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 0h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 10h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + +''' + page11_expected_output = '''EEPROM hexdump for port Ethernet0 page 11h + Lower page 0h + 00000000 0d 00 06 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000010 00 00 00 00 00 00 01 81 00 00 00 00 00 00 00 00 |................| + 00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 0h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 11h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + +''' + def side_effect(offset, num_bytes): + if offset == 0: + return lower_page_bytearray + else: + return upper_page0_bytearray + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.read_eeprom = MagicMock(side_effect=side_effect) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom-hexdump'], ["-p", "Ethernet0", "-n", "10"]) + assert result.exit_code == 0 + assert result.output == page10_expected_output + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom-hexdump'], ["-p", "Ethernet0", "-n", "11"]) + assert result.exit_code == 0 + assert result.output == page11_expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) From 8e5ff74f018ec44a7c84907d52c2382ad7efef3f Mon Sep 17 00:00:00 2001 From: anamehra <54692434+anamehra@users.noreply.github.com> Date: Fri, 19 Apr 2024 13:38:30 -0700 Subject: [PATCH 35/45] Revert "Revert "route_check: Skip route checks if bgp feature is not enabled"" (#3270) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "Revert "route_check: Skip route checks if bgp feature is not enabled"…" This reverts commit 01ee98ec6fb11057e0db41334405c6f0f4942214. --- scripts/route_check.py | 16 ++++ tests/route_check_test.py | 9 +- tests/route_check_test_data.py | 157 ++++++++++++++++++++++++++++++++- 3 files changed, 177 insertions(+), 5 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 5349acd0afc..ee417dc49cc 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -533,6 +533,18 @@ def filter_out_standalone_tunnel_routes(namespace, routes): return updated_routes +def is_feature_bgp_enabled(namespace): + """ + Check if bgp feature is enabled or disabled. + Return True if enabled else False. + """ + cfg_db = multi_asic.connect_config_db_for_ns(namespace) + feature_table = cfg_db.get_table("FEATURE") + bgp_enabled = False + if 'bgp' in feature_table: + if feature_table['bgp']["state"] == "enabled": + bgp_enabled = True + return bgp_enabled def check_frr_pending_routes(namespace): """ @@ -831,6 +843,10 @@ def main(): signal.signal(signal.SIGALRM, handler) load_db_config() + if not is_feature_bgp_enabled(namespace): + print_message(syslog.LOG_INFO, "BGP feature is disabled, exiting without checking routes!!") + return 0, None + while True: signal.alarm(TIMEOUT_SECONDS) ret, res= check_routes(namespace) diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 820f0621077..1f92b3d19ae 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -271,11 +271,14 @@ def test_timeout(self, mock_dbs, force_hang): # Test timeout ex_raised = False # Use an expected failing test case to trigger the select - set_test_case_data(TEST_DATA['2']) - + ct_data = TEST_DATA['2'] + set_test_case_data(ct_data) try: - with patch('sys.argv', [route_check.__file__.split('/')[-1]]): + with patch('sys.argv', [route_check.__file__.split('/')[-1]]), \ + patch('route_check.load_db_config', side_effect=lambda: init_db_conns(ct_data[NAMESPACE])): + ret, res = route_check.main() + except Exception as err: ex_raised = True expect = "timeout occurred" diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 50c6276f262..c5a606cb901 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -23,6 +23,7 @@ VNET_ROUTE_TABLE = 'VNET_ROUTE_TABLE' INTF_TABLE = 'INTF_TABLE' RT_ENTRY_TABLE = 'ASIC_STATE' +FEATURE_TABLE = 'FEATURE' SEPARATOR = ":" DEVICE_METADATA = "DEVICE_METADATA" MUX_CABLE = "MUX_CABLE" @@ -32,7 +33,17 @@ RT_ENTRY_KEY_PREFIX = 'SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest":\"' RT_ENTRY_KEY_SUFFIX = '\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000023\"}' -DEFAULT_CONFIG_DB = {DEVICE_METADATA: {LOCALHOST: {}}} +DEFAULT_CONFIG_DB = { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } + } + } TEST_DATA = { "0": { @@ -330,6 +341,11 @@ CONFIG_DB: { DEVICE_METADATA: { LOCALHOST: {"subtype": "DualToR"} + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } } }, APPL_DB: { @@ -396,6 +412,11 @@ "soc_ipv6": "fc02:1000::3/128", "state": "auto" }, + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } } }, APPL_DB: { @@ -633,6 +654,11 @@ CONFIG_DB: { DEVICE_METADATA: { LOCALHOST: {"subtype": "DualToR"} + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } } }, APPL_DB: { @@ -954,5 +980,132 @@ }, RET: -1, }, - + "22": { + DESCR: "basic good one on single asic, bgp disabled", + MULTI_ASIC: False, + NAMESPACE: [''], + ARGS: "route_check -m INFO -i 1000", + PRE: { + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "disabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + }, + "23": { + DESCR: "basic good one on multi-asic, bgp disabled", + MULTI_ASIC: True, + NAMESPACE: ['asic0'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "disabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + ASIC0: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + }, } From 07d6d27795c71c34e97c30faa40a24d0650330b9 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Sat, 20 Apr 2024 00:25:11 +0300 Subject: [PATCH 36/45] [fast/warm-reboot] Retain TRANSCEIVER_INFO tables on fast/warm-reboot (#3240) * [fast/warm-reboot] Retain TRANSCEIVER_INFO/STATUS tables on reboot Signed-off-by: Stepan Blyschak * Remove TRANSCEIVER_STATUS --------- Signed-off-by: Stepan Blyschak --- scripts/fast-reboot | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index f265318aa2f..91791b37714 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -252,6 +252,7 @@ function backup_database() and not string.match(k, 'MIRROR_SESSION_TABLE|') \ and not string.match(k, 'FG_ROUTE_TABLE|') \ and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ + and not string.match(k, 'TRANSCEIVER_INFO|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then From b143ea6dc7d53d04dc68c4707c63d94760fb025a Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:54:59 -0700 Subject: [PATCH 37/45] [chassis][voq]Add fabric monitoring commands. (#3239) What I did Add a force option for config fabric port unisolate command. Add a show command to display if a up link is get isolated or not . Example output is : # show fabric isolation asic0 Local Link Auto Isolated Manual Isolated Isolated ------------ --------------- ----------------- ---------- 0 0 0 0 1 0 0 0 2 0 0 0 .... Add test for the new commands. The test is failed now as it needs this sonic-net/sonic-swss#3089 merged in first. --- config/fabric.py | 55 ++++++++++++++++++++++++++------ scripts/fabricstat | 40 +++++++++++++++++++++++ show/fabric.py | 12 +++++++ tests/config_fabric_test.py | 23 ++++++++++--- tests/fabricstat_test.py | 45 ++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 33 +++++++++++++++++++ 6 files changed, 194 insertions(+), 14 deletions(-) diff --git a/config/fabric.py b/config/fabric.py index 16ce35f7330..5c1931418f9 100644 --- a/config/fabric.py +++ b/config/fabric.py @@ -2,7 +2,10 @@ import utilities_common.cli as clicommon import utilities_common.multi_asic as multi_asic_util from sonic_py_common import multi_asic -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, APP_FABRIC_PORT_TABLE_NAME + +FABRIC_PORT_STATUS_TABLE_PREFIX = APP_FABRIC_PORT_TABLE_NAME+"|" + # # 'config fabric ...' @@ -66,19 +69,13 @@ def isolate(portid, namespace): # @port.command() @click.argument('portid', metavar='', required=True) +@click.option('-f', '--force', is_flag=True, default=False, help='Force to unisolate a link even if it is auto isolated.') @multi_asic_util.multi_asic_click_option_namespace -def unisolate(portid, namespace): +def unisolate(portid, namespace, force): """FABRIC PORT unisolate """ ctx = click.get_current_context() - if not portid.isdigit(): - ctx.fail("Invalid portid") - - n_asics = multi_asic.get_num_asics() - if n_asics > 1 and namespace is None: - ctx.fail('Must specify asic') - # Connect to config database config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() @@ -87,6 +84,37 @@ def unisolate(portid, namespace): state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) state_db.connect(state_db.STATE_DB, False) + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail( 'Must specify asic' ) + + # If "all" is specified then unisolate all ports. + if portid == "all": + port_keys = state_db.keys(state_db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + for port_key in port_keys: + port_data = state_db.get_all(state_db.STATE_DB, port_key) + if "REMOTE_PORT" in port_data: + port_number = int( port_key.replace( "FABRIC_PORT_TABLE|PORT", "" ) ) + + # Make sure configuration data exists + portName = f'Fabric{port_number}' + portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName) + if not bool( portConfigData ): + ctx.fail( "Fabric monitor configuration data not present" ) + + # Update entry + config_db.mod_entry( "FABRIC_PORT", portName, {'isolateStatus': False} ) + if force: + forceShutCnt = int( portConfigData['forceUnisolateStatus'] ) + forceShutCnt += 1 + config_db.mod_entry( "FABRIC_PORT", portName, + {'forceUnisolateStatus': forceShutCnt}) + + return + + if not portid.isdigit(): + ctx.fail( "Invalid portid" ) + # check if the port is actually in use portName = f'PORT{portid}' portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName) @@ -102,6 +130,15 @@ def unisolate(portid, namespace): # Update entry config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': False}) + if force: + forceShutCnt = int( portConfigData['forceUnisolateStatus'] ) + forceShutCnt += 1 + config_db.mod_entry( "FABRIC_PORT", portName, + {'forceUnisolateStatus': forceShutCnt}) + + click.echo("Force unisolate the link.") + click.echo("It will clear all fabric link monitoring status for this link!") + # # 'config fabric port monitor ...' # diff --git a/scripts/fabricstat b/scripts/fabricstat index 205e3170bc8..29b8ffdbe04 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -307,6 +307,40 @@ class FabricReachability(FabricStat): print(tabulate(body, header, tablefmt='simple', stralign='right')) return +class FabricIsolation(FabricStat): + def isolation_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get the set of all fabric ports + port_keys = self.db.keys(self.db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + # Create a new dictionary. The keys are the local port values in integer format. + # Only fabric ports that have remote port data are added. + port_dict = {} + for port_key in port_keys: + port_data = self.db.get_all(self.db.STATE_DB, port_key) + if "REMOTE_PORT" in port_data: + port_number = int(port_key.replace("FABRIC_PORT_TABLE|PORT", "")) + port_dict.update({port_number: port_data}) + # Create ordered table of fabric ports. + header = ["Local Link", "Auto Isolated", "Manual Isolated", "Isolated"] + auto_isolated = 0 + manual_isolated = 0 + isolated = 0 + body = [] + for port_number in sorted(port_dict.keys()): + port_data = port_dict[port_number] + if "AUTO_ISOLATED" in port_data: + auto_isolated = port_data["AUTO_ISOLATED"] + if "CONFIG_ISOLATED" in port_data: + manual_isolated = port_data["CONFIG_ISOLATED"] + if "ISOLATED" in port_data: + isolated = port_data["ISOLATED"] + body.append((port_number, auto_isolated, manual_isolated, isolated)); + if self.namespace: + print(f"\n{self.namespace}") + print(tabulate(body, header, tablefmt='simple', stralign='right')) + return + def main(): global cnstat_dir global cnstat_fqn_file_port @@ -329,12 +363,14 @@ Examples: parser.add_argument('-r','--reachability', action='store_true', help='Display reachability, otherwise port stat') parser.add_argument('-n','--namespace', default=None, help='Display fabric ports counters for specific namespace') parser.add_argument('-e', '--errors', action='store_true', help='Display errors') + parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') args = parser.parse_args() queue = args.queue reachability = args.reachability + isolation_status = args.isolation namespace = args.namespace errors_only = args.errors @@ -362,6 +398,10 @@ Examples: stat = FabricReachability(ns) stat.reachability_print() return + elif isolation_status: + stat = FabricIsolation(ns) + stat.isolation_print() + return else: stat = FabricPortStat(ns) cnstat_dict = stat.get_cnstat_dict() diff --git a/show/fabric.py b/show/fabric.py index c8dc956e44a..c67a28ac155 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -13,6 +13,18 @@ def counters(): """Show fabric port counters""" pass +@fabric.group(invoke_without_command=True) +@multi_asic_util.multi_asic_click_option_namespace +@click.option('-e', '--errors', is_flag=True) +def isolation(namespace, errors): + """Show fabric isolation status""" + cmd = ['fabricstat', '-i'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if errors: + cmd += ["-e"] + clicommon.run_command(cmd) + @fabric.group(invoke_without_command=True) @multi_asic_util.multi_asic_click_option_namespace @click.option('-e', '--errors', is_flag=True) diff --git a/tests/config_fabric_test.py b/tests/config_fabric_test.py index ca8a8b8a097..a535781facc 100644 --- a/tests/config_fabric_test.py +++ b/tests/config_fabric_test.py @@ -42,22 +42,35 @@ def test_config_isolation(self, ctx): expect_result = 0 assert operator.eq(result.exit_code, expect_result) - # Issue command "config fabric port isolate 1", - # check if the result has the error message as port 1 is not in use. - result = self.basic_check("port", ["isolate", "1"], ctx) - assert "Port 1 is not in use" in result.output - # Issue command "config fabric port unisolate 0", # check if the result is expected. result = self.basic_check("port", ["unisolate", "0"], ctx) expect_result = 0 assert operator.eq(result.exit_code, expect_result) + # Issue command "config fabric port unisolate 0", + # check if the result is expected. + result = self.basic_check("port", ["unisolate", "0", "--force"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + assert "Force unisolate the link" in result.output + + # Issue command "config fabric port isolate 1", + # check if the result has the error message as port 1 is not in use. + result = self.basic_check("port", ["isolate", "1"], ctx) + assert "Port 1 is not in use" in result.output + # Issue command "config fabric port unisolate 1", # check if the result has the error message as port 1 is not in use. result = self.basic_check("port", ["unisolate", "1"], ctx) assert "Port 1 is not in use" in result.output + # Issue command "config fabric port unisolate all -n asic1" + # check if the result has the warning message + result = self.basic_check("port", ["unisolate", "all", "--force"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + def test_config_fabric_monitor_threshold(self, ctx): # Issue command "config fabric port monitor error threshold <#> <#>" # with an out of range number, check if the result has the error message. diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index 625c1d14a0d..cc06701d817 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -151,6 +151,37 @@ 7 0 93 up """ +multi_asic_fabric_isolation = """\ + +asic0 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 2 0 0 0 + 4 0 0 0 + 6 0 0 0 + 7 0 0 0 + +asic1 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 4 0 0 0 +""" + +multi_asic_fabric_isolation_asic0 = """\ + +asic0 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 2 0 0 0 + 4 0 0 0 + 6 0 0 0 + 7 0 0 0 +""" + + class TestFabricStat(object): @classmethod def setup_class(cls): @@ -271,6 +302,20 @@ def test_multi_show_fabric_reachability_asic(self): assert return_code == 0 assert result == multi_asic_fabric_reachability_asic0 + def test_multi_show_fabric_isolation(self): + return_code, result = get_result_and_return_code(['fabricstat', '-i']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_isolation + + def test_multi_show_fabric_isolation_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-i', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_isolation_asic0 + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 5cd30c986cc..edff84e5547 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -2710,19 +2710,52 @@ }, "FABRIC_PORT|Fabric0": { "alias": "Fabric0", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "0" }, "FABRIC_PORT|Fabric1": { "alias": "Fabric1", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "1" }, "FABRIC_PORT|Fabric2": { "alias": "Fabric2", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "2" }, + "FABRIC_PORT|Fabric3": { + "alias": "Fabric3", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "3" + }, + "FABRIC_PORT|Fabric4": { + "alias": "Fabric4", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "4" + }, + "FABRIC_PORT|Fabric5": { + "alias": "Fabric5", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "5" + }, + "FABRIC_PORT|Fabric6": { + "alias": "Fabric6", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "6" + }, + "FABRIC_PORT|Fabric7": { + "alias": "Fabric7", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "7" + }, "DHCP_RELAY|Vlan1000": { "dhcpv6_servers": [ "fc02:2000::1" From d48a83083437bf0996aead44e572b2fbd1b4f4db Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:47:49 -0700 Subject: [PATCH 38/45] Add Multi ASIC support for apply-patch (#3249) * Add Multi ASIC support for apply-patch * Add more test cases. * Ignore mock diff exception * Address comments. * Fix errors * Add empty case handle * Refactor extract scope * Fix UT * Fix extract for single asic * Adding localhost into log if scope is empty * Fix format in log * Fix log * Fix log * Fix variable --- config/main.py | 117 ++++++++---- generic_config_updater/change_applier.py | 44 +++-- generic_config_updater/generic_updater.py | 167 +++++++++++------ generic_config_updater/gu_common.py | 35 ++-- tests/config_test.py | 133 ++++++++++++-- .../change_applier_test.py | 32 +++- .../multiasic_change_applier_test.py | 172 ++++++++++++++++++ .../multiasic_generic_updater_test.py | 167 +++++++++++++++++ 8 files changed, 726 insertions(+), 141 deletions(-) create mode 100644 tests/generic_config_updater/multiasic_change_applier_test.py create mode 100644 tests/generic_config_updater/multiasic_generic_updater_test.py diff --git a/config/main.py b/config/main.py index a068a1b7f4d..8f3b7245bd5 100644 --- a/config/main.py +++ b/config/main.py @@ -19,7 +19,7 @@ from jsonpatch import JsonPatchConflict from jsonpointer import JsonPointerException from collections import OrderedDict -from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat +from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat, extract_scope from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports @@ -1152,6 +1152,24 @@ def validate_gre_type(ctx, _, value): return gre_type_value except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) + +# Function to apply patch for a single ASIC. +def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): + scope, changes = scope_changes + # Replace localhost to DEFAULT_NAMESPACE which is db definition of Host + if scope.lower() == "localhost" or scope == "": + scope = multi_asic.DEFAULT_NAMESPACE + + scope_for_log = scope if scope else "localhost" + try: + # Call apply_patch with the ASIC-specific changes and predefined parameters + GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + results[scope_for_log] = {"success": True, "message": "Success"} + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + except Exception as e: + results[scope_for_log] = {"success": False, "message": str(e)} + log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @@ -1357,12 +1375,47 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) + results = {} config_format = ConfigFormat[format.upper()] - GenericUpdater().apply_patch(patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + # Initialize a dictionary to hold changes categorized by scope + changes_by_scope = {} + + # Iterate over each change in the JSON Patch + for change in patch: + scope, modified_path = extract_scope(change["path"]) + + # Modify the 'path' in the change to remove the scope + change["path"] = modified_path + + # Check if the scope is already in our dictionary, if not, initialize it + if scope not in changes_by_scope: + changes_by_scope[scope] = [] + # Add the modified change to the appropriate list based on scope + changes_by_scope[scope].append(change) + + # Empty case to force validate YANG model. + if not changes_by_scope: + asic_list = [multi_asic.DEFAULT_NAMESPACE] + asic_list.extend(multi_asic.get_namespace_list()) + for asic in asic_list: + changes_by_scope[asic] = [] + + # Apply changes for each scope + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + + # Check if any updates failed + failures = [scope for scope, result in results.items() if not result['success']] + + if failures: + failure_messages = '\n'.join([f"- {failed_scope}: {results[failed_scope]['message']}" for failed_scope in failures]) + raise Exception(f"Failed to apply patch on the following scopes:\n{failure_messages}") + + log.log_notice(f"Patch applied successfully for {patch}.") click.secho("Patch applied successfully.", fg="cyan", underline=True) except Exception as ex: - click.secho("Failed to apply patch", fg="red", underline=True, err=True) + click.secho("Failed to apply patch due to: {}".format(ex), fg="red", underline=True, err=True) ctx.fail(ex) @config.command() @@ -2078,7 +2131,7 @@ def synchronous_mode(sync_mode): if ADHOC_VALIDATION: if sync_mode != 'enable' and sync_mode != 'disable': raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode) - + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() try: @@ -2086,7 +2139,7 @@ def synchronous_mode(sync_mode): except ValueError as e: ctx = click.get_current_context() ctx.fail("Error: Invalid argument %s, expect either enable or disable" % sync_mode) - + click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n Option 1. config save -y \n config reload -y \n @@ -2152,7 +2205,7 @@ def portchannel(db, ctx, namespace): @click.pass_context def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): """Add port channel""" - + fvs = { 'admin_status': 'up', 'mtu': '9100', @@ -2164,7 +2217,7 @@ def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): fvs['min_links'] = str(min_links) if fallback != 'false': fvs['fallback'] = 'true' - + db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_portchannel_name_valid(portchannel_name) != True: @@ -2172,18 +2225,18 @@ def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) if is_portchannel_present_in_db(db, portchannel_name): ctx.fail("{} already exists!".format(portchannel_name)) # TODO: MISSING CONSTRAINT IN YANG MODEL - + try: db.set_entry('PORTCHANNEL', portchannel_name, fvs) except ValueError: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'".format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - + @portchannel.command('del') @click.argument('portchannel_name', metavar='', required=True) @click.pass_context def remove_portchannel(ctx, portchannel_name): """Remove port channel""" - + db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_portchannel_name_valid(portchannel_name) != True: @@ -2201,7 +2254,7 @@ def remove_portchannel(ctx, portchannel_name): if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0: # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name)) - + try: db.set_entry('PORTCHANNEL', portchannel_name, None) except JsonPatchConflict: @@ -2219,7 +2272,7 @@ def portchannel_member(ctx): def add_portchannel_member(ctx, portchannel_name, port_name): """Add member to port channel""" db = ValidatedConfigDBConnector(ctx.obj['db']) - + if ADHOC_VALIDATION: if clicommon.is_port_mirror_dst_port(db, port_name): ctx.fail("{} is configured as mirror destination port".format(port_name)) # TODO: MISSING CONSTRAINT IN YANG MODEL @@ -2236,7 +2289,7 @@ def add_portchannel_member(ctx, portchannel_name, port_name): # Dont proceed if the port channel does not exist if is_portchannel_present_in_db(db, portchannel_name) is False: ctx.fail("{} is not present.".format(portchannel_name)) - + # Don't allow a port to be member of port channel if it is configured with an IP address for key,value in db.get_table('INTERFACE').items(): if type(key) == tuple: @@ -2274,7 +2327,7 @@ def add_portchannel_member(ctx, portchannel_name, port_name): member_port_speed = member_port_entry.get(PORT_SPEED) port_speed = port_entry.get(PORT_SPEED) # TODO: MISSING CONSTRAINT IN YANG MODEL - if member_port_speed != port_speed: + if member_port_speed != port_speed: ctx.fail("Port speed of {} is different than the other members of the portchannel {}" .format(port_name, portchannel_name)) @@ -2347,7 +2400,7 @@ def del_portchannel_member(ctx, portchannel_name, port_name): # Dont proceed if the the port is not an existing member of the port channel if not is_port_member_of_this_portchannel(db, port_name, portchannel_name): ctx.fail("{} is not a member of portchannel {}".format(port_name, portchannel_name)) - + try: db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None) except JsonPatchConflict: @@ -2534,7 +2587,7 @@ def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer if not namespaces['front_ns']: config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if ADHOC_VALIDATION: + if ADHOC_VALIDATION: if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: return try: @@ -3504,7 +3557,7 @@ def del_community(db, community): if community not in snmp_communities: click.echo("SNMP community {} is not configured".format(community)) sys.exit(1) - + config_db = ValidatedConfigDBConnector(db.cfgdb) try: config_db.set_entry('SNMP_COMMUNITY', community, None) @@ -4562,7 +4615,7 @@ def fec(ctx, interface_name, interface_fec, verbose): def ip(ctx): """Set IP interface attributes""" pass - + def validate_vlan_exists(db,text): data = db.get_table('VLAN') keys = list(data.keys()) @@ -4630,12 +4683,12 @@ def add(ctx, interface_name, ip_addr, gw): table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") - + if table_name == "VLAN_INTERFACE": if not validate_vlan_exists(config_db, interface_name): ctx.fail(f"Error: {interface_name} does not exist. Vlan must be created before adding an IP address") return - + interface_entry = config_db.get_entry(table_name, interface_name) if len(interface_entry) == 0: if table_name == "VLAN_SUB_INTERFACE": @@ -5057,7 +5110,7 @@ def cable_length(ctx, interface_name, length): if not is_dynamic_buffer_enabled(config_db): ctx.fail("This command can only be supported on a system with dynamic buffer enabled") - + if ADHOC_VALIDATION: # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) @@ -5402,7 +5455,7 @@ def unbind(ctx, interface_name): config_db.set_entry(table_name, interface_name, subintf_entry) else: config_db.set_entry(table_name, interface_name, None) - + click.echo("Interface {} IP disabled and address(es) removed due to unbinding VRF.".format(interface_name)) # # 'ipv6' subgroup ('config interface ipv6 ...') @@ -6580,7 +6633,7 @@ def add_loopback(ctx, loopback_name): lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple] if loopback_name in lo_intfs: ctx.fail("{} already exists".format(loopback_name)) # TODO: MISSING CONSTRAINT IN YANG VALIDATION - + try: config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"}) except ValueError: @@ -6604,7 +6657,7 @@ def del_loopback(ctx, loopback_name): ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ] for ip in ips: config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None) - + try: config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None) except JsonPatchConflict: @@ -6662,9 +6715,9 @@ def ntp(ctx): def add_ntp_server(ctx, ntp_ip_address): """ Add NTP server IP """ if ADHOC_VALIDATION: - if not clicommon.is_ipaddress(ntp_ip_address): + if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid IP address') - db = ValidatedConfigDBConnector(ctx.obj['db']) + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: click.echo("NTP server {} is already configured".format(ntp_ip_address)) @@ -6675,7 +6728,7 @@ def add_ntp_server(ctx, ntp_ip_address): {'resolve_as': ntp_ip_address, 'association_type': 'server'}) except ValueError as e: - ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") @@ -6691,7 +6744,7 @@ def del_ntp_server(ctx, ntp_ip_address): if ADHOC_VALIDATION: if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid IP address') - db = ValidatedConfigDBConnector(ctx.obj['db']) + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: try: @@ -7019,19 +7072,19 @@ def add(ctx, name, ipaddr, port, vrf): if not is_valid_collector_info(name, ipaddr, port, vrf): return - config_db = ValidatedConfigDBConnector(ctx.obj['db']) + config_db = ValidatedConfigDBConnector(ctx.obj['db']) collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2): click.echo("Only 2 collectors can be configured, please delete one") return - + try: config_db.mod_entry('SFLOW_COLLECTOR', name, {"collector_ip": ipaddr, "collector_port": port, "collector_vrf": vrf}) except ValueError as e: - ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) return # @@ -7364,7 +7417,7 @@ def add_subinterface(ctx, subinterface_name, vid): if vid is not None: subintf_dict.update({"vlan" : vid}) subintf_dict.update({"admin_status" : "up"}) - + try: config_db.set_entry('VLAN_SUB_INTERFACE', subinterface_name, subintf_dict) except ValueError as e: diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index d0818172f8f..32a356bf9ae 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -1,12 +1,14 @@ import copy import json +import subprocess import jsondiff import importlib import os import tempfile from collections import defaultdict from swsscommon.swsscommon import ConfigDBConnector -from .gu_common import genericUpdaterLogging +from sonic_py_common import multi_asic +from .gu_common import GenericConfigUpdaterError, genericUpdaterLogging SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" @@ -32,12 +34,11 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(): - config_db = ConfigDBConnector() +def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() return config_db - def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -73,8 +74,9 @@ class ChangeApplier: updater_conf = None - def __init__(self): - self.config_db = get_config_db() + def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + self.config_db = get_config_db(self.namespace) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -160,18 +162,32 @@ def apply(self, change): log_error("Failed to apply Json change") return ret - def remove_backend_tables_from_config(self, data): for key in self.backend_tables: data.pop(key, None) - def _get_running_config(self): - (_, fname) = tempfile.mkstemp(suffix="_changeApplier") - os.system("sonic-cfggen -d --print-data > {}".format(fname)) - run_data = {} - with open(fname, "r") as s: - run_data = json.load(s) - if os.path.isfile(fname): + _, fname = tempfile.mkstemp(suffix="_changeApplier") + + if self.namespace: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + + with open(fname, "w") as file: + result = subprocess.Popen(cmd, stdout=file, stderr=subprocess.PIPE, text=True) + _, err = result.communicate() + + return_code = result.returncode + if return_code: os.remove(fname) + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + + run_data = {} + try: + with open(fname, "r") as file: + run_data = json.load(file) + finally: + if os.path.isfile(fname): + os.remove(fname) return run_data diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index f9aab823365..b75939749ce 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,4 +1,5 @@ import json +import jsonpointer import os from enum import Enum from .gu_common import GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ @@ -6,10 +7,37 @@ from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier +from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" +def extract_scope(path): + if not path: + raise Exception("Wrong patch with empty path.") + + try: + pointer = jsonpointer.JsonPointer(path) + parts = pointer.parts + except Exception as e: + raise Exception(f"Error resolving path: '{path}' due to {e}") + + if not parts: + raise Exception("Wrong patch with empty path.") + if parts[0].startswith("asic"): + if not parts[0][len("asic"):].isnumeric(): + raise Exception(f"Error resolving path: '{path}' due to incorrect ASIC number.") + scope = parts[0] + remainder = "/" + "/".join(parts[1:]) + elif parts[0] == "localhost": + scope = "localhost" + remainder = "/" + "/".join(parts[1:]) + else: + scope = "" + remainder = path + + return scope, remainder + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -29,77 +57,82 @@ def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, - patch_wrapper=None): + patch_wrapper=None, + namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) def apply(self, patch, sort=True): - self.logger.log_notice("Patch application starting.") - self.logger.log_notice(f"Patch: {patch}") + scope = self.namespace if self.namespace else 'localhost' + self.logger.log_notice(f"{scope}: Patch application starting.") + self.logger.log_notice(f"{scope}: Patch: {patch}") # Get old config - self.logger.log_notice("Getting current config db.") + self.logger.log_notice(f"{scope} getting current config db.") old_config = self.config_wrapper.get_config_db_as_json() # Generate target config - self.logger.log_notice("Simulating the target full config after applying the patch.") + self.logger.log_notice(f"{scope}: simulating the target full config after applying the patch.") target_config = self.patch_wrapper.simulate_patch(patch, old_config) - + # Validate all JsonPatch operations on specified fields - self.logger.log_notice("Validating all JsonPatch operations are permitted on the specified fields") + self.logger.log_notice(f"{scope}: validating all JsonPatch operations are permitted on the specified fields") self.config_wrapper.validate_field_operation(old_config, target_config) # Validate target config does not have empty tables since they do not show up in ConfigDb - self.logger.log_notice("Validating target config does not have empty tables, " \ + self.logger.log_notice(f"{scope}: alidating target config does not have empty tables, " \ "since they do not show up in ConfigDb.") empty_tables = self.config_wrapper.get_empty_tables(target_config) if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError("Given patch is not valid because it will result in empty tables " \ + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ "which is not allowed in ConfigDb. " \ f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: - self.logger.log_notice("Sorting patch updates.") + self.logger.log_notice(f"{scope}: sorting patch updates.") changes = self.patchsorter.sort(patch) else: - self.logger.log_notice("Converting patch to JsonChange.") + self.logger.log_notice(f"{scope}: converting patch to JsonChange.") changes = [JsonChange(jsonpatch.JsonPatch([element])) for element in patch] - + changes_len = len(changes) - self.logger.log_notice(f"The patch was converted into {changes_len} " \ + self.logger.log_notice(f"The {scope} patch was converted into {changes_len} " \ f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") for change in changes: self.logger.log_notice(f" * {change}") # Apply changes in order - self.logger.log_notice(f"Applying {changes_len} change{'s' if changes_len != 1 else ''} " \ + self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") for change in changes: self.logger.log_notice(f" * {change}") self.changeapplier.apply(change) # Validate config updated successfully - self.logger.log_notice("Verifying patch updates are reflected on ConfigDB.") + self.logger.log_notice(f"{scope}: verifying patch updates are reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) if not(self.patch_wrapper.verify_same_json(target_config, new_config)): - raise GenericConfigUpdaterError(f"After applying patch to config, there are still some parts not updated") + raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") + + self.logger.log_notice(f"{scope} patch application completed.") - self.logger.log_notice("Patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None): + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier() - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -122,15 +155,18 @@ def replace(self, target_config): self.logger.log_notice("Config replacement completed.") + class FileSystemConfigRollbacker: def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, - config_wrapper=None): + config_wrapper=None, + namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer() - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") @@ -168,7 +204,7 @@ def checkpoint(self, checkpoint_name): def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") - + self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") if not self._checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") @@ -236,12 +272,13 @@ def _delete_checkpoint(self, name): path = self._get_checkpoint_full_path(name) return os.remove(path) + class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None): + def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self) - ConfigReplacer.__init__(self) - FileSystemConfigRollbacker.__init__(self) + PatchApplier.__init__(self, namespace=namespace) + ConfigReplacer.__init__(self, namespace=namespace) + FileSystemConfigRollbacker.__init__(self, namespace=namespace) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer @@ -265,10 +302,12 @@ def list_checkpoints(self): def delete_checkpoint(self, checkpoint_name): self.decorated_config_rollbacker.delete_checkpoint(checkpoint_name) + class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer) + def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) + self.namespace = namespace self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -280,13 +319,15 @@ def replace(self, target_config): config_db_target_config = self.config_wrapper.convert_sonic_yang_to_config_db(target_config) Decorator.replace(self, config_db_target_config) + class ConfigLockDecorator(Decorator): def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, - config_lock = ConfigLock()): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker) + config_lock=ConfigLock(), + namespace=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) self.config_lock = config_lock @@ -307,28 +348,35 @@ def execute_write_action(self, action, *args): action(*args) self.config_lock.release_lock() + class GenericUpdateFactory: + def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: - patch_applier = SonicYangDecorator( - decorated_patch_applier = patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper, + namespace=self.namespace) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier = patch_applier) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) return patch_applier @@ -337,24 +385,27 @@ def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yan config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper) + config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: - config_replacer = SonicYangDecorator( - decorated_config_replacer = config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper, + namespace=self.namespace) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer = config_replacer) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) return config_replacer @@ -363,18 +414,19 @@ def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_table config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper = config_wrapper, config_replacer = config_replacer) + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker = config_rollbacker) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) return config_rollbacker @@ -383,15 +435,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper() + return DryRunConfigWrapper(namespace=self.namespace) else: - return ConfigWrapper() + return ConfigWrapper(namespace=self.namespace) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier() + return ChangeApplier(namespace=self.namespace) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -408,10 +460,11 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, return NonStrictPatchSorter(config_wrapper, patch_wrapper, config_splitter) + class GenericUpdater: - def __init__(self, generic_update_factory=None): + def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory() + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index a6cb8de0944..974c540c07a 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -9,7 +9,7 @@ import copy import re import os -from sonic_py_common import logger +from sonic_py_common import logger, multi_asic from enum import Enum YANG_DIR = "/usr/local/yang-models" @@ -52,7 +52,8 @@ def __eq__(self, other): return False class ConfigWrapper: - def __init__(self, yang_dir = YANG_DIR): + def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None @@ -63,13 +64,16 @@ def get_config_db_as_json(self): return config_db_json def _get_config_db_as_text(self): - # TODO: Getting configs from CLI is very slow, need to get it from sonic-cffgen directly - cmd = "show runningconfiguration all" - result = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + + result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) text, err = result.communicate() return_code = result.returncode if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") return text def get_sonic_yang_as_json(self): @@ -147,12 +151,12 @@ def validate_config_db_config(self, config_db_as_json): def validate_field_operation(self, old_config, target_config): """ - Some fields in ConfigDB are restricted and may not allow third-party addition, replacement, or removal. - Because YANG only validates state and not transitions, this method helps to JsonPatch operations/transitions for the specified fields. + Some fields in ConfigDB are restricted and may not allow third-party addition, replacement, or removal. + Because YANG only validates state and not transitions, this method helps to JsonPatch operations/transitions for the specified fields. """ patch = jsonpatch.JsonPatch.from_diff(old_config, target_config) - - # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation + + # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation illegal_operations_to_fields_map = { 'add':[], 'replace': [], @@ -180,7 +184,7 @@ def _invoke_validating_function(cmd, jsonpatch_element): with open(GCU_FIELD_OP_CONF_FILE, "r") as s: gcu_field_operation_conf = json.load(s) else: - raise GenericConfigUpdaterError("GCU field operation validators config file not found") + raise GenericConfigUpdaterError("GCU field operation validators config file not found") for element in patch: path = element["path"] @@ -296,8 +300,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None): - super().__init__() + def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): + super().__init__(namespace=namespace) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -317,8 +321,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None): - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/tests/config_test.py b/tests/config_test.py index cc0ac22e986..1054a52a33f 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -15,7 +15,7 @@ import click from click.testing import CliRunner -from sonic_py_common import device_info +from sonic_py_common import device_info, multi_asic from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -1699,7 +1699,7 @@ def test_config_load_mgmt_config_ipv6_only(self, get_cmd_module, setup_single_br } } self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv6_only_output, 7) - + def test_config_load_mgmt_config_ipv4_ipv6(self, get_cmd_module, setup_single_broadcom_asic): device_desc_result = { 'DEVICE_METADATA': { @@ -1931,19 +1931,19 @@ def test_warm_restart_neighsyncd_timer_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + def test_warm_restart_neighsyncd_timer(self): config.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["warm_restart"].commands["neighsyncd_timer"], ["0"], obj=obj) print(result.exit_code) print(result.output) assert result.exit_code != 0 assert "neighsyncd warm restart timer must be in range 1-9999" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_bgp_timer_yang_validation(self): @@ -1957,7 +1957,7 @@ def test_warm_restart_bgp_timer_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + def test_warm_restart_bgp_timer(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -1969,7 +1969,7 @@ def test_warm_restart_bgp_timer(self): print(result.output) assert result.exit_code != 0 assert "bgp warm restart timer must be in range 1-3600" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_teamsyncd_timer_yang_validation(self): @@ -1995,7 +1995,7 @@ def test_warm_restart_teamsyncd_timer(self): print(result.output) assert result.exit_code != 0 assert "teamsyncd warm restart timer must be in range 1-3600" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_bgp_eoiu_yang_validation(self): @@ -2052,7 +2052,7 @@ def test_add_cablelength_invalid_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + @patch("config.main.ConfigDBConnector.get_entry", mock.Mock(return_value="Port Info")) @patch("config.main.is_dynamic_buffer_enabled", mock.Mock(return_value=True)) def test_add_cablelength_with_invalid_name_invalid_length(self): @@ -2078,7 +2078,7 @@ def setup_class(cls): print("SETUP") import config.main importlib.reload(config.main) - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) def test_add_loopback_with_invalid_name_yang_validation(self): @@ -2116,7 +2116,7 @@ def test_del_nonexistent_loopback_adhoc_validation(self): print(result.output) assert result.exit_code != 0 assert "Loopback12 does not exist" in result.output - + def test_del_nonexistent_loopback_adhoc_validation(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -2128,7 +2128,7 @@ def test_del_nonexistent_loopback_adhoc_validation(self): print(result.output) assert result.exit_code != 0 assert "Loopbax1 is invalid, name should have prefix 'Loopback' and suffix '<0-999>'" in result.output - + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(return_value=True)) @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) def test_add_loopback_yang_validation(self): @@ -2152,7 +2152,7 @@ def test_add_loopback_adhoc_validation(self): print(result.exit_code) print(result.output) assert result.exit_code == 0 - + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -2635,3 +2635,110 @@ def test_date_bad(self): @classmethod def teardown_class(cls): print('TEARDOWN') + + +class TestApplyPatchMultiAsic(unittest.TestCase): + def setUp(self): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + self.runner = CliRunner() + self.patch_file_path = 'path/to/patch.json' + self.patch_content = [ + { + "op": "add", + "path": "/localhost/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "add", + "path": "/asic0/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "replace", + "path": "/asic1/PORT/Ethernet1/mtu", + "value": "9200" + } + ] + + def test_apply_patch_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path], catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_database_config() \ No newline at end of file diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index afe166b008f..4c9b33c3a4d 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -74,16 +74,28 @@ def debug_print(msg): # Mimics os.system call for sonic-cfggen -d --print-data > filename -# -def os_system_cfggen(cmd): +def subprocess_Popen_cfggen(cmd, *args, **kwargs): global running_config - fname = cmd.split(">")[-1].strip() + # Extract file name from kwargs if 'stdout' is a file object + stdout = kwargs.get('stdout') + if hasattr(stdout, 'name'): + fname = stdout.name + else: + raise ValueError("stdout is not a file") + + # Write the running configuration to the file specified in stdout with open(fname, "w") as s: - s.write(json.dumps(running_config, indent=4)) - debug_print("File created {} type={} cfg={}".format(fname, - type(running_config), json.dumps(running_config)[1:40])) - return 0 + json.dump(running_config, s, indent=4) + + class MockPopen: + def __init__(self): + self.returncode = 0 # Simulate successful command execution + + def communicate(self): + return "", "" # Simulate empty stdout and stderr + + return MockPopen() # mimics config_db.set_entry @@ -213,14 +225,14 @@ def vlan_validate(old_cfg, new_cfg, keys): class TestChangeApplier(unittest.TestCase): - @patch("generic_config_updater.change_applier.os.system") + @patch("generic_config_updater.change_applier.subprocess.Popen") @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") - def test_change_apply(self, mock_set, mock_db, mock_os_sys): + def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): global read_data, running_config, json_changes, json_change_index global start_running_config - mock_os_sys.side_effect = os_system_cfggen + mock_subprocess_Popen.side_effect = subprocess_Popen_cfggen mock_db.return_value = DB_HANDLE mock_set.side_effect = set_entry diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py new file mode 100644 index 00000000000..e8b277618f1 --- /dev/null +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -0,0 +1,172 @@ +import unittest +from importlib import reload +from unittest.mock import patch, MagicMock +from generic_config_updater.generic_updater import extract_scope +import generic_config_updater.change_applier +import generic_config_updater.services_validator +import generic_config_updater.gu_common + + +class TestMultiAsicChangeApplier(unittest.TestCase): + + def test_extract_scope(self): + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), + "/sometable/data": (True, "", "/sometable/data"), + "": (False, "", ""), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic77": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception as e: + assert(result == False) + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.return_value = { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + # Instantiate ChangeApplier with the default namespace + applier = generic_config_updater.change_applier.ChangeApplier() + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Call the apply method with the change object + applier.apply(change) + + # Assert ConfigDBConnector called with the correct namespace + mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="") + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.return_value = { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + # Instantiate ChangeApplier with the default namespace + applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Call the apply method with the change object + applier.apply(change) + + # Assert ConfigDBConnector called with the correct namespace + mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.side_effect = Exception("Failed to get running config") + # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment + namespace = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Test the behavior when os.system fails + with self.assertRaises(Exception) as context: + applier.apply(change) + + self.assertTrue('Failed to get running config' in str(context.exception)) + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to simulate configuration where crucial tables are unexpectedly empty + mock_get_running_config.return_value = { + "tables": { + # Simulate empty tables or missing crucial configuration + }, + "services": { + # Normally, services would be listed here + } + } + + # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + + # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables + change = MagicMock() + + # Apply the patch + try: + assert(applier.apply(change) != 0) + except Exception: + pass diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py new file mode 100644 index 00000000000..4a55eb98be3 --- /dev/null +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -0,0 +1,167 @@ +import json +import jsonpatch +import unittest +from unittest.mock import patch, MagicMock + +import generic_config_updater.change_applier +import generic_config_updater.generic_updater +import generic_config_updater.services_validator +import generic_config_updater.gu_common + +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import generic_updater as gu + +class TestMultiAsicPatchApplier(unittest.TestCase): + + @patch('generic_config_updater.gu_common.ConfigWrapper.get_empty_tables', return_value=[]) + @patch('generic_config_updater.gu_common.ConfigWrapper.get_config_db_as_json') + @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') + @patch('generic_config_updater.generic_updater.ChangeApplier') + def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): + namespace = "asic0" + patch_data = jsonpatch.JsonPatch([ + { + "op": "add", + "path": "/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet1/mtu", + "value": "9200" + } + ]) + + original_config = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + applied_config = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + }, + "NEW_ACL_TABLE": { + "policy_desc": "New ACL Table", + "ports": [ + "Ethernet1", + "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9200", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + mock_get_config.side_effect = [ + original_config, + original_config, + original_config, + applied_config + ] + + mock_simulate_patch.return_value = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": [ + "Ethernet1", "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + }, + "NEW_ACL_TABLE": { + "policy_desc": "New ACL Table", + "ports": [ + "Ethernet1", + "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9200", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + + # Apply the patch and verify + patch_applier.apply(patch_data) + + # Assertions to ensure the namespace is correctly used in underlying calls + mock_ChangeApplier.assert_called_once_with(namespace=namespace) From cd5c05804d4c0e71e7b8f61e10d0662776cfb072 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:03:53 +0800 Subject: [PATCH 39/45] Fix db_migrate.py show error and back trace while loading configuration on Linecard (#3257) Fix db_migrate.py show error and back trace while loading configuration on Linecard #### Why I did it Fix [issue @](https://github.com/sonic-net/sonic-buildimage/issues/18389) #### How I did it Revert code change by https://github.com/sonic-net/sonic-utilities/pull/3100 Check DB config initialize state and ignore when initialized. #### How to verify it Pass all UT. Manually test. ##### Work item tracking - Microsoft ADO **(number only)**: 27384235 #### Which release branch to backport (provide reason below if selected) N/A #### Description for the changelog Fix db_migrate.py show error and back trace while loading configuration on Linecard #### A picture of a cute animal (not mandatory but encouraged) --- scripts/db_migrator.py | 12 +++++++++--- tests/db_migrator_test.py | 18 +++++++++++++++++- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index c4d4e2da9c9..79bf0490248 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -8,10 +8,9 @@ import re from sonic_py_common import device_info, logger -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig from minigraph import parse_xml from utilities_common.helper import update_config -from utilities_common.general import load_db_config INIT_CFG_FILE = '/etc/sonic/init_cfg.json' MINIGRAPH_FILE = '/etc/sonic/minigraph.xml' @@ -1277,7 +1276,14 @@ def main(): socket_path = args.socket namespace = args.namespace - load_db_config() + # Can't load global config base on the result of is_multi_asic(), because on multi-asic device, when db_migrate.py + # run on the local database, ASIC instance will have not created the /var/run/redis0/sonic-db/database-config.json + if args.namespace is not None: + if not SonicDBConfig.isGlobalInit(): + SonicDBConfig.initializeGlobalConfig() + else: + if not SonicDBConfig.isInit(): + SonicDBConfig.initialize() if socket_path: dbmgtr = DBMigrator(namespace, socket=socket_path) diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 21ca9148df6..97914b4aad4 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -5,7 +5,7 @@ from unittest import mock from deepdiff import DeepDiff -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, SonicDBConfig from sonic_py_common import device_info from .mock_tables import dbconnector @@ -889,6 +889,22 @@ def test_init(self, mock_args): import db_migrator db_migrator.main() + @mock.patch('argparse.ArgumentParser.parse_args') + @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) + @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) + def test_init_no_namespace(self, mock_args): + mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + import db_migrator + db_migrator.main() + + @mock.patch('argparse.ArgumentParser.parse_args') + @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) + @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) + def test_init_namespace(self, mock_args): + mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + import db_migrator + db_migrator.main() + class TestGNMIMigrator(object): @classmethod From df94636b8bc9631ae2e87a84e66770a3b792a1f7 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Thu, 25 Apr 2024 10:28:32 -0700 Subject: [PATCH 40/45] Display target firmware version through CLI (#3274) Signed-off-by: Mihir Patel --- scripts/sfpshow | 3 +++ tests/mock_tables/asic1/state_db.json | 8 +++++++- tests/mock_tables/state_db.json | 8 +++++++- tests/sfp_test.py | 18 ++++++++++++++++++ utilities_common/sfp_helper.py | 8 +++++++- 5 files changed, 42 insertions(+), 3 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 2d647176dad..b04bf516311 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -334,6 +334,9 @@ class SFPShow(object): output += covert_application_advertisement_to_output_string(indent, sfp_info_dict) elif key == 'active_firmware' or key == 'inactive_firmware': output += '{}{}: {}\n'.format(indent, data_map[key], sfp_firmware_info_dict[key] if key in sfp_firmware_info_dict else 'N/A') + elif key.startswith(('e1_', 'e2_')): + if key in sfp_firmware_info_dict: + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_firmware_info_dict[key]) else: output += '{}{}: {}\n'.format(indent, data_map[key], sfp_info_dict[key]) diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index 195b8e87f37..dd76cb7f500 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -70,7 +70,13 @@ }, "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { "active_firmware": "X.X", - "inactive_firmware": "X.X" + "inactive_firmware": "X.X", + "e1_active_firmware" : "X.X", + "e1_inactive_firmware" : "Y.Y", + "e1_server_firmware" : "A.B.C.D", + "e2_active_firmware" : "X.X", + "e2_inactive_firmware" : "Y.Y", + "e2_server_firmware" : "A.B.C.D" }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index d1da74ae4bc..d6eb88c5139 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -674,7 +674,13 @@ }, "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { "active_firmware": "X.X", - "inactive_firmware": "X.X" + "inactive_firmware": "X.X", + "e1_active_firmware" : "X.X", + "e1_inactive_firmware" : "Y.Y", + "e1_server_firmware" : "A.B.C.D", + "e2_active_firmware" : "X.X", + "e2_inactive_firmware" : "Y.Y", + "e2_server_firmware" : "A.B.C.D" }, "TRANSCEIVER_INFO|Ethernet72": { "active_apsel_hostlane4": "N/A", diff --git a/tests/sfp_test.py b/tests/sfp_test.py index d1f03280fe3..51a5a1f23e3 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -600,6 +600,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A @@ -690,6 +696,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A @@ -780,6 +792,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index 4a6ad65b573..e38e764c9c1 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -38,7 +38,13 @@ 'supported_max_tx_power': 'Supported Max TX Power', 'supported_min_tx_power': 'Supported Min TX Power', 'supported_max_laser_freq': 'Supported Max Laser Frequency', - 'supported_min_laser_freq': 'Supported Min Laser Frequency' + 'supported_min_laser_freq': 'Supported Min Laser Frequency', + 'e1_active_firmware': 'E1 Active Firmware', + 'e1_inactive_firmware': 'E1 Inactive Firmware', + 'e1_server_firmware': 'E1 Server Firmware', + 'e2_active_firmware': 'E2 Active Firmware', + 'e2_inactive_firmware': 'E2 Inactive Firmware', + 'e2_server_firmware': 'E2 Server Firmware' } CMIS_DATA_MAP = {**QSFP_DATA_MAP, **QSFP_CMIS_DELTA_DATA_MAP} From 9b463ca54b4d65cb77aa733224e2bed2ec4cd22a Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:57:28 -0700 Subject: [PATCH 41/45] [chassis][voq] Add fabric capacity monitoring cmds (#3255) What I did Added fabric capacity monitoring related commands and the tests. To config the warning threshold ~# config fabric Usage: config fabric [OPTIONS] COMMAND [ARGS]... FABRIC-related configuration tasks Options: -h, -?, --help Show this message and exit. Commands: monitor FABRIC MONITOR configuration tasks port FABRIC PORT configuration tasks ~# config fabric monitor Usage: config fabric monitor [OPTIONS] COMMAND [ARGS]... FABRIC MONITOR configuration tasks Options: -h, -?, --help Show this message and exit. Commands: capacity FABRIC MONITOR CAPACITY configuration tasks ~# config fabric monitor capacity Usage: config fabric monitor capacity [OPTIONS] COMMAND [ARGS]... FABRIC MONITOR CAPACITY configuration tasks Options: -?, -h, --help Show this message and exit. Commands: threshold FABRIC CAPACITY MONITOR THRESHOLD configuration tasks ~# config fabric monitor capacity threshold 90 ~# To show the capacity : ~# show fabric Usage: show fabric [OPTIONS] COMMAND [ARGS]... Show fabric information Options: -h, -?, --help Show this message and exit. Commands: counters Show fabric port counters isolation Show fabric isolation status monitor Show fabric monitor reachability Show fabric reachability ~# show fabric monitor Usage: show fabric monitor [OPTIONS] COMMAND [ARGS]... Show fabric monitor Options: -?, -h, --help Show this message and exit. Commands: capacity Show fabric capacity ~# show fabric monitor capacity Monitored fabric capacity threshold: 90% ASIC Operating Total # % Last Event Last Time Links of Links ------ ----------- ---------- ------- ------------ ----------- asic0 32 144 22.2222 Lower 0:19:17 ago asic1 28 144 19.4444 Lower 0:19:17 ago asic2 32 144 22.2222 Lower 0:19:17 ago asic3 28 144 19.4444 Lower 0:19:17 ago asic4 32 144 22.2222 Lower 0:19:17 ago asic5 28 144 19.4444 Lower 0:19:17 ago asic6 32 144 22.2222 Lower 0:19:19 ago asic7 28 144 19.4444 Lower 0:19:16 ago asic8 32 144 22.2222 Lower 0:19:16 ago asic9 28 144 19.4444 Lower 0:19:20 ago asic10 31 144 21.5278 Lower 0:19:16 ago asic11 28 144 19.4444 Lower 0:19:17 ago ~# ~# config fabric monitor capacity threshold 10 ~# show fabric monitor capacity Monitored fabric capacity threshold: 10% ASIC Operating Total # % Last Event Last Time Links of Links ------ ----------- ---------- ------- ------------ ----------- asic0 32 144 22.2222 Higher 0:00:03 ago asic1 28 144 19.4444 Higher 0:00:03 ago asic2 32 144 22.2222 Higher 0:00:03 ago asic3 28 144 19.4444 Higher 0:00:03 ago asic4 32 144 22.2222 Higher 0:00:03 ago asic5 28 144 19.4444 Higher 0:00:03 ago asic6 32 144 22.2222 Higher 0:00:05 ago asic7 28 144 19.4444 Higher 0:00:02 ago asic8 32 144 22.2222 Higher 0:00:02 ago asic9 28 144 19.4444 Higher 0:00:06 ago asic10 31 144 21.5278 Higher 0:00:02 ago asic11 --- config/fabric.py | 42 ++++++++++ scripts/fabricstat | 110 +++++++++++++++++++++++--- show/fabric.py | 17 ++++ tests/config_fabric_test.py | 24 ++++++ tests/fabricstat_test.py | 34 +++++++- tests/mock_tables/asic0/state_db.json | 10 +++ tests/mock_tables/asic1/state_db.json | 10 +++ tests/mock_tables/config_db.json | 1 + tests/mock_tables/state_db.json | 10 +++ 9 files changed, 244 insertions(+), 14 deletions(-) diff --git a/config/fabric.py b/config/fabric.py index 5c1931418f9..84607d9ebe7 100644 --- a/config/fabric.py +++ b/config/fabric.py @@ -315,3 +315,45 @@ def recovery(pollcount, namespace): {"monPollThreshRecovery": pollcount}) +# +# 'config fabric monitor ...' +# +@fabric.group(cls=clicommon.AbbreviationGroup, name='monitor') +def capacity_monitor(): + """FABRIC MONITOR configuration tasks""" + pass + +# +# 'config fabric monitor capacity...' +# +@capacity_monitor.group(cls=clicommon.AbbreviationGroup) +def capacity(): + """FABRIC MONITOR CAPACITY configuration tasks""" + pass + +# +# 'config fabric monitor capacity threshold ' +# +@capacity.command() +@click.argument('capacitythreshold', metavar='', required=True, type=int) +def threshold(capacitythreshold): + """FABRIC CAPACITY MONITOR THRESHOLD configuration tasks""" + ctx = click.get_current_context() + + if capacitythreshold < 5 or capacitythreshold > 250: + ctx.fail("threshold must be in range 5...250") + + namespaces = multi_asic.get_namespace_list() + for idx, namespace in enumerate(namespaces, start=1): + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {"monCapacityThreshWarn": capacitythreshold}) diff --git a/scripts/fabricstat b/scripts/fabricstat index 29b8ffdbe04..cf3d14bf5e9 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -1,19 +1,21 @@ #!/usr/bin/env python3 import argparse -from collections import OrderedDict, namedtuple +import click import json import os import sys +import utilities_common.multi_asic as multi_asic_util -from utilities_common import constants -from utilities_common.cli import json_serial, UserCache -from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate +from collections import OrderedDict, namedtuple +from datetime import datetime, timezone, timedelta from natsort import natsorted -from tabulate import tabulate from sonic_py_common import multi_asic from swsscommon.swsscommon import APP_FABRIC_PORT_TABLE_NAME, COUNTERS_TABLE, COUNTERS_FABRIC_PORT_NAME_MAP, COUNTERS_FABRIC_QUEUE_NAME_MAP -import utilities_common.multi_asic as multi_asic_util +from tabulate import tabulate +from utilities_common import constants +from utilities_common.cli import json_serial, UserCache +from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate # mock the redis for unit test purposes # try: @@ -280,6 +282,62 @@ class FabricQueueStat(FabricStat): print(tabulate(table, queuestat_header, tablefmt='simple', stralign='right')) print() +class FabricCapacity(FabricStat): + def __init__(self, namespace, table_cnt, threshold): + self.db = None + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace) + self.table_cnt = table_cnt + self.threshold = threshold + + def capacity_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get fabric capacity data from STATE_DB table FABRIC_CAPACITY_TABLE + # and store them in fabric_capacity_data + fabric_capacity_data = self.db.get_all(self.db.STATE_DB, "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA") + operational_fap_capacity = 0 + operational_fabric_capacity = 0 + operational_fabric_links = 0; + total_fabric_links = 0; + ratio = 0 + last_event = "None" + last_time = "Never" + + # Get data from fabric_capacity_data + if "fabric_capacity" in fabric_capacity_data: + operational_fabric_capacity = int(fabric_capacity_data['fabric_capacity']) + operational_fabric_capacity = operational_fabric_capacity/1000.0 + if "number_of_links" in fabric_capacity_data: + total_fabric_links = int(fabric_capacity_data['number_of_links']) + if "operating_links" in fabric_capacity_data: + operational_fabric_links = int(fabric_capacity_data['operating_links']) + if "warning_threshold" in fabric_capacity_data: + th = fabric_capacity_data['warning_threshold'] + th = th + "%" + self.threshold.append(th) + if "last_event" in fabric_capacity_data: + last_event = fabric_capacity_data['last_event'] + if "last_event_time" in fabric_capacity_data: + last_time = fabric_capacity_data['last_event_time'] + + # Calculate the ratio of number of operational links and all links + if total_fabric_links > 0: + ratio = operational_fabric_links/total_fabric_links*100 + + if last_time != "Never": + dt = datetime.fromtimestamp(int(last_time), timezone.utc) + td = datetime.now(timezone.utc) - dt + td_without_ms = timedelta(seconds=td.seconds) + last_time = str(td_without_ms) +" ago" + + asic_name = "asic0" + if self.namespace: + asic_name = self.namespace + + # Update the table to print + self.table_cnt.append((asic_name, operational_fabric_links, total_fabric_links, ratio, last_event, last_time)) + class FabricReachability(FabricStat): def reachability_print(self): # Connect to database @@ -355,6 +413,8 @@ Examples: fabricstat -p -n asic0 -e fabricstat -q fabricstat -q -n asic0 + fabricstat -c + fabricstat -c -n asic0 fabricstat -C fabricstat -D """) @@ -363,6 +423,7 @@ Examples: parser.add_argument('-r','--reachability', action='store_true', help='Display reachability, otherwise port stat') parser.add_argument('-n','--namespace', default=None, help='Display fabric ports counters for specific namespace') parser.add_argument('-e', '--errors', action='store_true', help='Display errors') + parser.add_argument('-c','--capacity',action='store_true', help='Display fabric capacity') parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') @@ -370,6 +431,7 @@ Examples: args = parser.parse_args() queue = args.queue reachability = args.reachability + capacity_status = args.capacity isolation_status = args.isolation namespace = args.namespace errors_only = args.errors @@ -410,14 +472,36 @@ Examples: else: stat.cnstat_print(cnstat_dict, errors_only) - if namespace is None: - # All asics or all fabric asics - multi_asic = multi_asic_util.MultiAsic() - for ns in multi_asic.get_ns_list_based_on_options(): - nsStat(ns, errors_only) + if capacity_status: + # show fabric capacity command + capacity_header = [] + table_cnt = [] + threshold = [] + capacity_header = ["ASIC", "Operating\nLinks", "Total #\nof Links", "%", "Last Event", "Last Time"] + if namespace is None: + # All asics or all fabric asics + multi_asic = multi_asic_util.MultiAsic() + for ns in multi_asic.get_ns_list_based_on_options(): + stat = FabricCapacity(ns, table_cnt, threshold) + stat.capacity_print() + else: + # Asic with namespace + stat = FabricCapacity(namespace, table_cnt, threshold) + stat.capacity_print() + + click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + click.echo() + click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: - # Asic with namespace - nsStat(namespace, errors_only) + # other show fabric commands + if namespace is None: + # All asics or all fabric asics + multi_asic = multi_asic_util.MultiAsic() + for ns in multi_asic.get_ns_list_based_on_options(): + nsStat(ns, errors_only) + else: + # Asic with namespace + nsStat(namespace, errors_only) if __name__ == "__main__": main() diff --git a/show/fabric.py b/show/fabric.py index c67a28ac155..785e1ab4779 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -13,6 +13,23 @@ def counters(): """Show fabric port counters""" pass +@fabric.group(cls=clicommon.AliasedGroup) +def monitor(): + """Show fabric monitor""" + pass + +@monitor.group(invoke_without_command=True) +@multi_asic_util.multi_asic_click_option_namespace +@click.option('-e', '--errors', is_flag=True) +def capacity(namespace, errors): + """Show fabric capacity""" + cmd = ['fabricstat', '-c'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if errors: + cmd += ['-e'] + clicommon.run_command(cmd) + @fabric.group(invoke_without_command=True) @multi_asic_util.multi_asic_click_option_namespace @click.option('-e', '--errors', is_flag=True) diff --git a/tests/config_fabric_test.py b/tests/config_fabric_test.py index a535781facc..230615cbaa8 100644 --- a/tests/config_fabric_test.py +++ b/tests/config_fabric_test.py @@ -118,6 +118,18 @@ def test_config_fabric_monitor_state(self, ctx): expect_result = 0 assert operator.eq(result.exit_code, expect_result) + def test_config_capacity(self, ctx): + # Issue command "config fabric monitor capacity threshold 90", + # check if the result is expected. + result = self.basic_check("monitor", ["capacity", "threshold", "90"], ctx) + expect_result=0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric monitor capacity threshold 3", + # check if the result has the warning message. + result = self.basic_check("monitor", ["capacity", "threshold", "3"], ctx) + assert "threshold must be in range 5...250" in result.output + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -152,6 +164,18 @@ def test_multi_config_fabric_monitor_state(self, ctx): expect_result = 0 assert operator.eq(result.exit_code, expect_result) + def test_config_capacity_multi(self, ctx): + # Issue command "config fabric monitor capacity threshold 80", + # check if the result is expected. + result = self.basic_check("monitor", ["capacity", "threshold", "80"], ctx) + expect_result=0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric monitor capacity threshold 4", + # check if the result has the warning message. + result = self.basic_check("monitor", ["capacity", "threshold", "4"], ctx) + assert "threshold must be in range 5...250" in result.output + @classmethod def teardown_class(cls): print("TEARDOWN_TEST") diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index cc06701d817..cc4c049806e 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -151,6 +151,25 @@ 7 0 93 up """ +multi_asic_fabric_capacity = """\ +Monitored fabric capacity threshold: 100% + + ASIC Operating Total # % Last Event Last Time + Links of Links +------ ----------- ---------- ---- ------------ ----------- + asic0 5 8 62.5 None Never + asic1 2 8 25 None Never +""" + +multi_asic_fabric_capacity_asic0 = """\ +Monitored fabric capacity threshold: 100% + + ASIC Operating Total # % Last Event Last Time + Links of Links +------ ----------- ---------- ---- ------------ ----------- + asic0 5 8 62.5 None Never +""" + multi_asic_fabric_isolation = """\ asic0 @@ -181,7 +200,6 @@ 7 0 0 0 """ - class TestFabricStat(object): @classmethod def setup_class(cls): @@ -302,6 +320,20 @@ def test_multi_show_fabric_reachability_asic(self): assert return_code == 0 assert result == multi_asic_fabric_reachability_asic0 + def test_mutli_show_fabric_capacity(self): + return_code, result = get_result_and_return_code(['fabricstat', '-c']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_capacity + + def test_multi_show_fabric_capacity_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-c', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_capacity_asic0 + def test_multi_show_fabric_isolation(self): return_code, result = get_result_and_return_code(['fabricstat', '-i']) print("return_code: {}".format(return_code)) diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 6ae0258be05..4f3f13c0ae0 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -304,5 +304,15 @@ }, "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { "status": "Active" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "221580", + "missing_capacity": "132948", + "operating_links": "5", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index dd76cb7f500..136a3329905 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -270,5 +270,15 @@ }, "FABRIC_PORT_TABLE|PORT7" : { "STATUS": "down" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "88632", + "missing_capacity": "265896", + "operating_links": "2", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index edff84e5547..b2bf54c995a 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -2703,6 +2703,7 @@ "direction": "RX" }, "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", "monErrThreshCrcCells": "1", "monErrThreshRxCells": "61035156", "monPollThreshIsolation": "1", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index d6eb88c5139..49ffaeedd89 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1671,5 +1671,15 @@ "minimum_current": "339", "timestamp": "20230704 17:38:04", "warning_status": "False" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "88632", + "missing_capacity": "265896", + "operating_links": "2", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } From 4c040131db7e01e794f7872071edb44acef56707 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:19:31 +0800 Subject: [PATCH 42/45] Migrate AAA table in db_migrator (#3284) Migrate AAA table in db_migrator #### Why I did it per-command AAA need enable in warm-upgrade case #### How I did it Add db_migrator code to migrate AAA table #### How to verify it Pass all test case. Add new test case. #### Which release branch to backport (provide reason below if selected) N/A #### Description for the changelog Migrate AAA table in db_migrator #### A picture of a cute animal (not mandatory but encouraged) --- scripts/db_migrator.py | 36 ++++++++++++++ .../config_db/per_command_aaa_disable.json | 6 +++ .../per_command_aaa_disable_expected.json | 6 +++ .../per_command_aaa_disable_golden.json | 8 +++ .../config_db/per_command_aaa_enable.json | 9 ++++ .../per_command_aaa_enable_expected.json | 12 +++++ .../per_command_aaa_enable_golden.json | 19 +++++++ .../per_command_aaa_no_authentication.json | 9 ++++ ...ommand_aaa_no_authentication_expected.json | 12 +++++ ..._command_aaa_no_authentication_golden.json | 19 +++++++ .../config_db/per_command_aaa_no_change.json | 15 ++++++ .../per_command_aaa_no_change_expected.json | 15 ++++++ .../per_command_aaa_no_change_golden.json | 19 +++++++ .../config_db/per_command_aaa_no_passkey.json | 8 +++ .../per_command_aaa_no_passkey_expected.json | 11 +++++ .../per_command_aaa_no_passkey_golden.json | 15 ++++++ .../config_db/per_command_aaa_no_tacplus.json | 5 ++ .../per_command_aaa_no_tacplus_expected.json | 12 +++++ .../per_command_aaa_no_tacplus_golden.json | 19 +++++++ tests/db_migrator_test.py | 49 +++++++++++++++++++ 20 files changed, 304 insertions(+) create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_disable.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_enable.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_change.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json create mode 100644 tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 79bf0490248..529069cdc29 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -807,6 +807,39 @@ def migrate_sflow_table(self): sflow_key = "SFLOW_SESSION_TABLE:{}".format(key) self.appDB.set(self.appDB.APPL_DB, sflow_key, 'sample_direction','rx') + def migrate_tacplus(self): + if not self.config_src_data or 'TACPLUS' not in self.config_src_data: + return + + tacplus_new = self.config_src_data['TACPLUS'] + log.log_notice('Migrate TACPLUS configuration') + + global_old = self.configDB.get_entry('TACPLUS', 'global') + if not global_old: + global_new = tacplus_new.get("global") + self.configDB.set_entry("TACPLUS", "global", global_new) + log.log_info('Migrate TACPLUS global: {}'.format(global_new)) + + def migrate_aaa(self): + if not self.config_src_data or 'AAA' not in self.config_src_data: + return + + aaa_new = self.config_src_data['AAA'] + log.log_notice('Migrate AAA configuration') + + authentication = self.configDB.get_entry('AAA', 'authentication') + if not authentication: + authentication_new = aaa_new.get("authentication") + self.configDB.set_entry("AAA", "authentication", authentication_new) + log.log_info('Migrate AAA authentication: {}'.format(authentication_new)) + + # setup per-command accounting + accounting = self.configDB.get_entry('AAA', 'accounting') + if not accounting: + accounting_new = aaa_new.get("accounting") + self.configDB.set_entry("AAA", "accounting", accounting_new) + log.log_info('Migrate AAA accounting: {}'.format(accounting_new)) + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -1234,6 +1267,9 @@ def common_migration_ops(self): # update FRR config mode based on minigraph parser on target image self.migrate_routing_config_mode() + self.migrate_tacplus() + self.migrate_aaa() + def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable.json b/tests/db_migrator_input/config_db/per_command_aaa_disable.json new file mode 100644 index 00000000000..215e3d7fe32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable.json @@ -0,0 +1,6 @@ +{ + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json new file mode 100644 index 00000000000..215e3d7fe32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json @@ -0,0 +1,6 @@ +{ + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json new file mode 100644 index 00000000000..abc38879b6d --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json @@ -0,0 +1,8 @@ +{ + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable.json b/tests/db_migrator_input/config_db/per_command_aaa_enable.json new file mode 100644 index 00000000000..0026e038504 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable.json @@ -0,0 +1,9 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json new file mode 100644 index 00000000000..694d2f5cb32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json @@ -0,0 +1,9 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change.json new file mode 100644 index 00000000000..518e1af6dbf --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change.json @@ -0,0 +1,15 @@ +{ + "AAA|accounting": { + "login": "local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "AAA|authorization": { + "login": "local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json new file mode 100644 index 00000000000..518e1af6dbf --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json @@ -0,0 +1,15 @@ +{ + "AAA|accounting": { + "login": "local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "AAA|authorization": { + "login": "local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json new file mode 100644 index 00000000000..6ec39507a19 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json @@ -0,0 +1,8 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json new file mode 100644 index 00000000000..690620e52f3 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json @@ -0,0 +1,11 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json new file mode 100644 index 00000000000..b06af48439b --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json @@ -0,0 +1,15 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json new file mode 100644 index 00000000000..c45e0745ed5 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json @@ -0,0 +1,5 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 97914b4aad4..0aeac7679ba 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -4,6 +4,7 @@ import argparse from unittest import mock from deepdiff import DeepDiff +import json from swsscommon.swsscommon import SonicV2Connector, SonicDBConfig from sonic_py_common import device_info @@ -960,3 +961,51 @@ def test_dns_nameserver_migrator_configdb(self): diff = DeepDiff(resulting_table, expected_table, ignore_order=True) assert not diff + +class TestAAAMigrator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def load_golden_config(self, dbmgtr, test_json): + dbmgtr.config_src_data = {} + + json_path = os.path.join(mock_db_path, 'config_db', test_json + ".json") + if os.path.exists(json_path): + with open(json_path) as f: + dbmgtr.config_src_data = json.load(f) + print("test_per_command_aaa load golden config success, config_src_data: {}".format(dbmgtr.config_src_data)) + else: + print("test_per_command_aaa load golden config failed, file {} does not exist.".format(test_json)) + + + @pytest.mark.parametrize('test_json', ['per_command_aaa_enable', + 'per_command_aaa_no_passkey', + 'per_command_aaa_disable', + 'per_command_aaa_no_change', + 'per_command_aaa_no_tacplus', + 'per_command_aaa_no_authentication']) + def test_per_command_aaa(self, test_json): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', test_json) + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + self.load_golden_config(dbmgtr, test_json + '_golden') + dbmgtr.migrate_tacplus() + dbmgtr.migrate_aaa() + resulting_table = dbmgtr.configDB.get_table("AAA") + + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', test_json + '_expected') + expected_db = Db() + expected_table = expected_db.cfgdb.get_table("AAA") + + print("test_per_command_aaa: {}".format(test_json)) + print("test_per_command_aaa, resulting_table: {}".format(resulting_table)) + print("test_per_command_aaa, expected_table: {}".format(expected_table)) + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff From 4a8ffc89c9e0bfa6a0022c6e5b8a8c1c66779b3f Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 30 Apr 2024 00:37:05 +0800 Subject: [PATCH 43/45] Update sonic-utilities to support new SKU Mellanox-SN5600-O128 (#3236) ### What I did Update sonic-utilities to support new SKU Mellanox-SN5600-O128 1. Add the SKU to the generic configuration updater 2. Simplify the logic of the buffer migrator to support the new SKU ### How to verify it Manual and unit tests --- .../gcu_field_operation_validators.conf.json | 2 +- scripts/mellanox_buffer_migrator.py | 9 ++++----- ...empty-config-with-device-info-nvidia-expected.json | 11 +++++++++++ .../empty-config-with-device-info-nvidia-input.json | 6 ++++++ tests/db_migrator_test.py | 1 + 5 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json create mode 100644 tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 76020af984e..68e49b6c03d 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5400", "ACS-SN5600" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], diff --git a/scripts/mellanox_buffer_migrator.py b/scripts/mellanox_buffer_migrator.py index a01e8665f5b..4ffc900ddd9 100755 --- a/scripts/mellanox_buffer_migrator.py +++ b/scripts/mellanox_buffer_migrator.py @@ -108,10 +108,9 @@ def __init__(self, configDB, appDB, stateDB): self.spc2_platforms = ["x86_64-mlnx_msn3700-r0", "x86_64-mlnx_msn3700c-r0"] self.spc3_platforms = ["x86_64-mlnx_msn4600-r0", "x86_64-mlnx_msn4600c-r0", "x86_64-mlnx_msn4700-r0"] - msftskus = ["Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D48C8", "Mellanox-SN2700-D40C8S8", - "Mellanox-SN3800-C64", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D112C8", "Mellanox-SN3800-D28C50"] + dynamic_model_skus = ["Mellanox-SN5600-O128"] - self.is_msft_sku = self.sku in msftskus + self.is_default_traditional_model = self.sku and self.sku.startswith("Mellanox-") and not self.sku in dynamic_model_skus self.pending_update_items = list() self.default_speed_list = ['1000', '10000', '25000', '40000', '50000', '100000', '200000', '400000'] @@ -822,7 +821,7 @@ def mlnx_flush_new_buffer_configuration(self): if not self.ready: return True - if not self.is_buffer_config_default and not self.is_buffer_config_empty or self.is_msft_sku: + if not self.is_buffer_config_default and not self.is_buffer_config_empty or self.is_default_traditional_model: log.log_notice("No item pending to be updated") metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'traditional' @@ -840,7 +839,7 @@ def mlnx_flush_new_buffer_configuration(self): return True def mlnx_is_buffer_model_dynamic(self): - return self.is_buffer_config_default and not self.is_msft_sku + return self.is_buffer_config_default and not self.is_default_traditional_model def mlnx_reorganize_buffer_tables(self, buffer_table, name): """ diff --git a/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json new file mode 100644 index 00000000000..1502ad0b294 --- /dev/null +++ b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json @@ -0,0 +1,11 @@ +{ + "VERSIONS|DATABASE": { + "VERSION": "version_3_0_3" + }, + "DEVICE_METADATA|localhost": { + "synchronous_mode": "enable", + "docker_routing_config_mode": "separated", + "platform": "x86_64-nvidia_sn5600-r0", + "hwsku": "Mellanox-SN5600-O128" + } +} diff --git a/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json new file mode 100644 index 00000000000..8f2b67da080 --- /dev/null +++ b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json @@ -0,0 +1,6 @@ +{ + "DEVICE_METADATA|localhost": { + "platform": "x86_64-nvidia_sn5600-r0", + "hwsku": "Mellanox-SN5600-O128" + } +} diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 0aeac7679ba..e21539766a1 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -157,6 +157,7 @@ def check_appl_db(self, result, expected): ['empty-config', 'empty-config-with-device-info-generic', 'empty-config-with-device-info-traditional', + 'empty-config-with-device-info-nvidia', 'non-default-config', 'non-default-xoff', 'non-default-lossless-profile-in-pg', From 45a19011c1fd4c5afa091b1befca43fe61664f4d Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 29 Apr 2024 12:53:46 -0400 Subject: [PATCH 44/45] Add Precommit and flake8 (#3287) ### What I did add precommit and flake8 to prevent syntax warning in python, e.g. https://github.com/sonic-net/sonic-buildimage/issues/18401 #### How I did it - initially, this check will be optional to verify its functionality, allowing developers to proceed even if it fails. - if it performs as expected, this check will become mandatory in the future. - use flake8 v4.0.1 in order to use diff only for PRs (flake8 with diff only is deprecated in v6.0.0 https://github.com/PyCQA/flake8/issues/1749 #### How to verify it https://dev.azure.com/mssonic/build/_build/results?buildId=530004&view=logs&jobId=0cb31af1-9c12-5e3c-148e-856fdcff3a02 --- .azure-pipelines/pre-commit-check.yml | 35 +++++++++++++++++++++++++++ .pre-commit-config.yaml | 9 +++++++ azure-pipelines.yml | 10 ++++++++ 3 files changed, 54 insertions(+) create mode 100644 .azure-pipelines/pre-commit-check.yml create mode 100644 .pre-commit-config.yaml diff --git a/.azure-pipelines/pre-commit-check.yml b/.azure-pipelines/pre-commit-check.yml new file mode 100644 index 00000000000..5b25bef76fc --- /dev/null +++ b/.azure-pipelines/pre-commit-check.yml @@ -0,0 +1,35 @@ +steps: +- checkout: self + clean: true + displayName: 'checkout sonic-utilities repo' + +- script: | + set -x + sudo pip install pre-commit + pre-commit install-hooks + displayName: 'Prepare pre-commit check' + +- script: | + # Run pre-commit check and capture the output + out=`pre-commit run --color never --from-ref HEAD^ --to-ref HEAD 2>&1` + RC=$? + if [[ $RC -ne 0 ]]; then + echo -e "The [pre-commit](http://pre-commit.com/) check detected issues in the files touched by this pull request.\n\ + The pre-commit check is a mandatory check, please fix detected issues.\n\ + \n\ + To run the pre-commit checks locally, you can follow below steps:\n\ + 1. Ensure that default python is python3.\n\ + 2. Ensure that the 'pre-commit' package is installed:\n\ + sudo pip install pre-commit\n\ + 3. Go to repository root folder\n\ + 4. Install the pre-commit hooks:\n\ + pre-commit install\n\ + 5. Use pre-commit to check staged file:\n\ + pre-commit\n\ + 6. Alternatively, you can check committed files using:\n\ + pre-commit run --from-ref --to-ref \n" + fi + echo "Pre-commit check results:" + echo "$out" + exit $RC + displayName: 'Run pre-commit check' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..1f76ad99140 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/PyCQA/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + entry: bash -c 'git diff HEAD^ HEAD -U0 -- "$@" | flake8 --diff "$@"' -- + args: ["--max-line-length=120"] diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9688eb2d5dc..dec731eea4d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,6 +21,16 @@ variables: value: $(Build.SourceBranchName) stages: +- stage: Pretest + jobs: + - job: static_analysis + displayName: "Static Analysis" + timeoutInMinutes: 10 + continueOnError: true + pool: ubuntu-20.04 + steps: + - template: .azure-pipelines/pre-commit-check.yml + - stage: Build jobs: From 099d40c9daa159f2c57a031af43c667bb36ab23c Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Mon, 29 Apr 2024 13:38:57 -0700 Subject: [PATCH 45/45] Run `ip neigh flush` before removing the IP address from the interface (#3281) * Ignore any error returned from `ip neigh flush` In the test_po_update test case, one of the things done there is to remove an IP address from a port channel interface. As part of that, the current handling for that issues a `ip neigh flush dev ...` command, added in sonic-net/sonic-utilities#606, presumably to remove old neighbor entries that would no longer be valid. I would think that the kernel would automatically do this, but maybe it didn't back then; I'm not sure if there's been a behavior change here since then. In some cases, this command is returning an error, saying "Failed to send flush request: No such file or directory". I'm not sure why this is; maybe when iproute2 is going through the list of neighbors, some neighbor entry was there, but then by the time it issued the deletion request, that neighbor entry was removed by the kernel since the IP address was removed. Either way, I don't believe a failure here is critical. Therefore, ignore any failures from running this command. Signed-off-by: Saikrishna Arcot * Move the IP neighbor flush to be before the IP address removal This should make sure that the IP neighbor flush should always work. This also requires the tests to be updated, to mock out the flush command call since that interface won't exist. Signed-off-by: Saikrishna Arcot --------- Signed-off-by: Saikrishna Arcot --- config/main.py | 11 +++-- tests/ip_config_test.py | 94 ++++++++++++++++++++++++----------------- tests/vlan_test.py | 36 ++++++++++------ 3 files changed, 83 insertions(+), 58 deletions(-) diff --git a/config/main.py b/config/main.py index 8f3b7245bd5..ea2a5154e31 100644 --- a/config/main.py +++ b/config/main.py @@ -4745,17 +4745,16 @@ def remove(ctx, interface_name, ip_addr): if output != "": if any(interface_name in output_line for output_line in output.splitlines()): ctx.fail("Cannot remove the last IP entry of interface {}. A static {} route is still bound to the RIF.".format(interface_name, ip_ver)) - remove_router_interface_ip_address(config_db, interface_name, ip_address) - interface_addresses = get_interface_ipaddresses(config_db, interface_name) - if len(interface_addresses) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False and get_intf_ipv6_link_local_mode(ctx, interface_name, table_name) != "enable": - if table_name != "VLAN_SUB_INTERFACE": - config_db.set_entry(table_name, interface_name, None) - if multi_asic.is_multi_asic(): command = ['sudo', 'ip', 'netns', 'exec', str(ctx.obj['namespace']), 'ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] else: command = ['ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] clicommon.run_command(command) + remove_router_interface_ip_address(config_db, interface_name, ip_address) + interface_addresses = get_interface_ipaddresses(config_db, interface_name) + if len(interface_addresses) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False and get_intf_ipv6_link_local_mode(ctx, interface_name, table_name) != "enable": + if table_name != "VLAN_SUB_INTERFACE": + config_db.set_entry(table_name, interface_name, None) # # 'loopback-action' subcommand diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index b227c76ff32..6003e7401a4 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -99,22 +99,28 @@ def test_add_del_interface_valid_ipv4(self): assert ('Eth36.10', '32.11.10.1/24') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Ethernet64 10.10.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet64", "10.10.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet0.10 10.11.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "10.11.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet0.10', '10.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "10.11.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet0.10', '10.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Eth36.10 32.11.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "32.11.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "32.11.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') def test_add_interface_invalid_ipv4(self): db = Db() @@ -185,20 +191,26 @@ def test_add_del_interface_valid_ipv6(self): assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Ethernet72 2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet72', '2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('INTERFACE') - - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet0.10', '1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') - - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet72', '2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('INTERFACE') + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet0.10', '1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') def test_del_interface_case_sensitive_ipv6(self): db = Db() @@ -209,10 +221,12 @@ def test_del_interface_case_sensitive_ipv6(self): assert ('Ethernet72', 'FC00::1/126') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet72 FC00::1/126 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "FC00::1/126"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet72', 'FC00::1/126') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "FC00::1/126"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet72', 'FC00::1/126') not in db.cfgdb.get_table('INTERFACE') def test_add_interface_invalid_ipv6(self): db = Db() @@ -248,10 +262,12 @@ def test_add_del_interface_ipv6_with_leading_zeros(self): assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet68 2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') not in db.cfgdb.get_table('INTERFACE') def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): db = Db() @@ -265,10 +281,12 @@ def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): assert ('Ethernet68', '3000::1/64') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet68 3000::001/64 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "3000::001/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "3000::001/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') def test_intf_vrf_bind_unbind(self): runner = CliRunner() diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 436e309281d..5a84737b2ac 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -368,13 +368,17 @@ def test_config_vlan_del_vlan(self, mock_restart_dhcp_relay_service): assert "Error: Vlan1000 can not be removed. First remove IP addresses assigned to this VLAN" in result.output # remove vlan IP`s - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "192.168.0.1/21"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "192.168.0.1/21"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "fc02:1000::1/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "fc02:1000::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 # del vlan with IP result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1000"], obj=db) @@ -778,15 +782,19 @@ def test_config_vlan_del_dhcp_relay_restart(self): obj = {"config_db": db.cfgdb} # remove vlan IP`s - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], - ["Vlan1000", "192.168.0.1/21"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Vlan1000", "192.168.0.1/21"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], - ["Vlan1000", "fc02:1000::1/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Vlan1000", "fc02:1000::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 # remove vlan members vlan_member = db.cfgdb.get_table("VLAN_MEMBER")