diff --git a/.azure-pipelines/pre-commit-check.yml b/.azure-pipelines/pre-commit-check.yml new file mode 100644 index 00000000000..5b25bef76fc --- /dev/null +++ b/.azure-pipelines/pre-commit-check.yml @@ -0,0 +1,35 @@ +steps: +- checkout: self + clean: true + displayName: 'checkout sonic-utilities repo' + +- script: | + set -x + sudo pip install pre-commit + pre-commit install-hooks + displayName: 'Prepare pre-commit check' + +- script: | + # Run pre-commit check and capture the output + out=`pre-commit run --color never --from-ref HEAD^ --to-ref HEAD 2>&1` + RC=$? + if [[ $RC -ne 0 ]]; then + echo -e "The [pre-commit](http://pre-commit.com/) check detected issues in the files touched by this pull request.\n\ + The pre-commit check is a mandatory check, please fix detected issues.\n\ + \n\ + To run the pre-commit checks locally, you can follow below steps:\n\ + 1. Ensure that default python is python3.\n\ + 2. Ensure that the 'pre-commit' package is installed:\n\ + sudo pip install pre-commit\n\ + 3. Go to repository root folder\n\ + 4. Install the pre-commit hooks:\n\ + pre-commit install\n\ + 5. Use pre-commit to check staged file:\n\ + pre-commit\n\ + 6. Alternatively, you can check committed files using:\n\ + pre-commit run --from-ref --to-ref \n" + fi + echo "Pre-commit check results:" + echo "$out" + exit $RC + displayName: 'Run pre-commit check' diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml index 8ebe082f50a..1686f20364e 100644 --- a/.github/workflows/semgrep.yml +++ b/.github/workflows/semgrep.yml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v3 - run: semgrep ci env: - SEMGREP_RULES: p/default + SEMGREP_RULES: "p/default r/python.lang.security.audit.dangerous-system-call-audit.dangerous-system-call-audit" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..1f76ad99140 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/PyCQA/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + entry: bash -c 'git diff HEAD^ HEAD -U0 -- "$@" | flake8 --diff "$@"' -- + args: ["--max-line-length=120"] diff --git a/acl_loader/main.py b/acl_loader/main.py index e81e05d9b71..f73f3eb0399 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -413,7 +413,7 @@ def parse_acl_json(filename): raise AclLoaderException("Invalid input file %s" % filename) return yang_acl - def load_rules_from_file(self, filename): + def load_rules_from_file(self, filename, skip_action_validation=False): """ Load file with ACL rules configuration in openconfig ACL format. Convert rules to Config DB schema. @@ -421,9 +421,9 @@ def load_rules_from_file(self, filename): :return: """ self.yang_acl = AclLoader.parse_acl_json(filename) - self.convert_rules() + self.convert_rules(skip_action_validation) - def convert_action(self, table_name, rule_idx, rule): + def convert_action(self, table_name, rule_idx, rule, skip_validation=False): rule_props = {} if rule.actions.config.forwarding_action == "ACCEPT": @@ -452,13 +452,13 @@ def convert_action(self, table_name, rule_idx, rule): raise AclLoaderException("Unknown rule action {} in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) - if not self.validate_actions(table_name, rule_props): + if not self.validate_actions(table_name, rule_props, skip_validation): raise AclLoaderException("Rule action {} is not supported in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) return rule_props - def validate_actions(self, table_name, action_props): + def validate_actions(self, table_name, action_props, skip_validation=False): if self.is_table_control_plane(table_name): return True @@ -481,6 +481,11 @@ def validate_actions(self, table_name, action_props): else: aclcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.ACL_STAGE_CAPABILITY_TABLE, stage.upper())) switchcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) + # In the load_minigraph path, it's possible that the STATE_DB entry haven't pop up because orchagent is stopped + # before loading acl.json. So we skip the validation if any table is empty + if skip_validation and (not aclcapability or not switchcapability): + warning("Skipped action validation as capability table is not present in STATE_DB") + return True for action_key in dict(action_props): action_list_key = self.ACL_ACTIONS_CAPABILITY_FIELD if action_list_key not in aclcapability: @@ -699,7 +704,7 @@ def validate_rule_fields(self, rule_props): if ("ICMPV6_TYPE" in rule_props or "ICMPV6_CODE" in rule_props) and protocol != 58: raise AclLoaderException("IP_PROTOCOL={} is not ICMPV6, but ICMPV6 fields were provided".format(protocol)) - def convert_rule_to_db_schema(self, table_name, rule): + def convert_rule_to_db_schema(self, table_name, rule, skip_action_validation=False): """ Convert rules format from openconfig ACL to Config DB schema :param table_name: ACL table name to which rule belong @@ -729,7 +734,7 @@ def convert_rule_to_db_schema(self, table_name, rule): elif self.is_table_l3(table_name): rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) - deep_update(rule_props, self.convert_action(table_name, rule_idx, rule)) + deep_update(rule_props, self.convert_action(table_name, rule_idx, rule, skip_action_validation)) deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_icmp(table_name, rule_idx, rule)) @@ -761,7 +766,7 @@ def deny_rule(self, table_name): return {} # Don't add default deny rule if table is not [L3, L3V6] return rule_data - def convert_rules(self): + def convert_rules(self, skip_aciton_validation=False): """ Convert rules in openconfig ACL format to Config DB schema :return: @@ -780,7 +785,7 @@ def convert_rules(self): for acl_entry_name in acl_set.acl_entries.acl_entry: acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name] try: - rule = self.convert_rule_to_db_schema(table_name, acl_entry) + rule = self.convert_rule_to_db_schema(table_name, acl_entry, skip_aciton_validation) deep_update(self.rules_info, rule) except AclLoaderException as ex: error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex)) @@ -1149,8 +1154,9 @@ def update(ctx): @click.option('--session_name', type=click.STRING, required=False) @click.option('--mirror_stage', type=click.Choice(["ingress", "egress"]), default="ingress") @click.option('--max_priority', type=click.INT, required=False) +@click.option('--skip_action_validation', is_flag=True, default=False, help="Skip action validation") @click.pass_context -def full(ctx, filename, table_name, session_name, mirror_stage, max_priority): +def full(ctx, filename, table_name, session_name, mirror_stage, max_priority, skip_action_validation): """ Full update of ACL rules configuration. If a table_name is provided, the operation will be restricted in the specified table. @@ -1168,7 +1174,7 @@ def full(ctx, filename, table_name, session_name, mirror_stage, max_priority): if max_priority: acl_loader.set_max_priority(max_priority) - acl_loader.load_rules_from_file(filename) + acl_loader.load_rules_from_file(filename, skip_action_validation) acl_loader.full_update() diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9688eb2d5dc..dec731eea4d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,6 +21,16 @@ variables: value: $(Build.SourceBranchName) stages: +- stage: Pretest + jobs: + - job: static_analysis + displayName: "Static Analysis" + timeoutInMinutes: 10 + continueOnError: true + pool: ubuntu-20.04 + steps: + - template: .azure-pipelines/pre-commit-check.yml + - stage: Build jobs: diff --git a/config/fabric.py b/config/fabric.py index a3870589ae3..84607d9ebe7 100644 --- a/config/fabric.py +++ b/config/fabric.py @@ -2,7 +2,10 @@ import utilities_common.cli as clicommon import utilities_common.multi_asic as multi_asic_util from sonic_py_common import multi_asic -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, APP_FABRIC_PORT_TABLE_NAME + +FABRIC_PORT_STATUS_TABLE_PREFIX = APP_FABRIC_PORT_TABLE_NAME+"|" + # # 'config fabric ...' @@ -66,19 +69,13 @@ def isolate(portid, namespace): # @port.command() @click.argument('portid', metavar='', required=True) +@click.option('-f', '--force', is_flag=True, default=False, help='Force to unisolate a link even if it is auto isolated.') @multi_asic_util.multi_asic_click_option_namespace -def unisolate(portid, namespace): +def unisolate(portid, namespace, force): """FABRIC PORT unisolate """ ctx = click.get_current_context() - if not portid.isdigit(): - ctx.fail("Invalid portid") - - n_asics = multi_asic.get_num_asics() - if n_asics > 1 and namespace is None: - ctx.fail('Must specify asic') - # Connect to config database config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() @@ -87,6 +84,37 @@ def unisolate(portid, namespace): state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) state_db.connect(state_db.STATE_DB, False) + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail( 'Must specify asic' ) + + # If "all" is specified then unisolate all ports. + if portid == "all": + port_keys = state_db.keys(state_db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + for port_key in port_keys: + port_data = state_db.get_all(state_db.STATE_DB, port_key) + if "REMOTE_PORT" in port_data: + port_number = int( port_key.replace( "FABRIC_PORT_TABLE|PORT", "" ) ) + + # Make sure configuration data exists + portName = f'Fabric{port_number}' + portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName) + if not bool( portConfigData ): + ctx.fail( "Fabric monitor configuration data not present" ) + + # Update entry + config_db.mod_entry( "FABRIC_PORT", portName, {'isolateStatus': False} ) + if force: + forceShutCnt = int( portConfigData['forceUnisolateStatus'] ) + forceShutCnt += 1 + config_db.mod_entry( "FABRIC_PORT", portName, + {'forceUnisolateStatus': forceShutCnt}) + + return + + if not portid.isdigit(): + ctx.fail( "Invalid portid" ) + # check if the port is actually in use portName = f'PORT{portid}' portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName) @@ -102,6 +130,15 @@ def unisolate(portid, namespace): # Update entry config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': False}) + if force: + forceShutCnt = int( portConfigData['forceUnisolateStatus'] ) + forceShutCnt += 1 + config_db.mod_entry( "FABRIC_PORT", portName, + {'forceUnisolateStatus': forceShutCnt}) + + click.echo("Force unisolate the link.") + click.echo("It will clear all fabric link monitoring status for this link!") + # # 'config fabric port monitor ...' # @@ -157,6 +194,39 @@ def error_threshold(crccells, rxcells, namespace): config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", {'monErrThreshCrcCells': crccells, 'monErrThreshRxCells': rxcells}) +def setFabricPortMonitorState(state, namespace, ctx): + """ set the fabric port monitor state""" + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {'monState': state}) + +# +# 'config fabric port montior state ' +# +@monitor.command() +@click.argument('state', metavar='', required=True) +@multi_asic_util.multi_asic_click_option_namespace +def state(state, namespace): + """FABRIC PORT MONITOR STATE configuration tasks""" + ctx = click.get_current_context() + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ns_list = multi_asic.get_namespace_list() + for namespace in ns_list: + setFabricPortMonitorState(state, namespace, ctx) + else: + setFabricPortMonitorState(state, namespace, ctx) + # # 'config fabric port monitor poll ...' # @@ -245,3 +315,45 @@ def recovery(pollcount, namespace): {"monPollThreshRecovery": pollcount}) +# +# 'config fabric monitor ...' +# +@fabric.group(cls=clicommon.AbbreviationGroup, name='monitor') +def capacity_monitor(): + """FABRIC MONITOR configuration tasks""" + pass + +# +# 'config fabric monitor capacity...' +# +@capacity_monitor.group(cls=clicommon.AbbreviationGroup) +def capacity(): + """FABRIC MONITOR CAPACITY configuration tasks""" + pass + +# +# 'config fabric monitor capacity threshold ' +# +@capacity.command() +@click.argument('capacitythreshold', metavar='', required=True, type=int) +def threshold(capacitythreshold): + """FABRIC CAPACITY MONITOR THRESHOLD configuration tasks""" + ctx = click.get_current_context() + + if capacitythreshold < 5 or capacitythreshold > 250: + ctx.fail("threshold must be in range 5...250") + + namespaces = multi_asic.get_namespace_list() + for idx, namespace in enumerate(namespaces, start=1): + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {"monCapacityThreshWarn": capacitythreshold}) diff --git a/config/main.py b/config/main.py index 259013ebc4f..820317b0986 100644 --- a/config/main.py +++ b/config/main.py @@ -19,7 +19,7 @@ from jsonpatch import JsonPatchConflict from jsonpointer import JsonPointerException from collections import OrderedDict -from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat +from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat, extract_scope from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports @@ -106,6 +106,8 @@ PORT_TPID = "tpid" DEFAULT_TPID = "0x8100" +DOM_CONFIG_SUPPORTED_SUBPORTS = ['0', '1'] + asic_type = None DSCP_RANGE = click.IntRange(min=0, max=63) @@ -1150,6 +1152,24 @@ def validate_gre_type(ctx, _, value): return gre_type_value except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) + +# Function to apply patch for a single ASIC. +def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): + scope, changes = scope_changes + # Replace localhost to DEFAULT_NAMESPACE which is db definition of Host + if scope.lower() == "localhost" or scope == "": + scope = multi_asic.DEFAULT_NAMESPACE + + scope_for_log = scope if scope else "localhost" + try: + # Call apply_patch with the ASIC-specific changes and predefined parameters + GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + results[scope_for_log] = {"success": True, "message": "Success"} + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + except Exception as e: + results[scope_for_log] = {"success": False, "message": str(e)} + log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @@ -1355,12 +1375,47 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) + results = {} config_format = ConfigFormat[format.upper()] - GenericUpdater().apply_patch(patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + # Initialize a dictionary to hold changes categorized by scope + changes_by_scope = {} + + # Iterate over each change in the JSON Patch + for change in patch: + scope, modified_path = extract_scope(change["path"]) + + # Modify the 'path' in the change to remove the scope + change["path"] = modified_path + # Check if the scope is already in our dictionary, if not, initialize it + if scope not in changes_by_scope: + changes_by_scope[scope] = [] + + # Add the modified change to the appropriate list based on scope + changes_by_scope[scope].append(change) + + # Empty case to force validate YANG model. + if not changes_by_scope: + asic_list = [multi_asic.DEFAULT_NAMESPACE] + asic_list.extend(multi_asic.get_namespace_list()) + for asic in asic_list: + changes_by_scope[asic] = [] + + # Apply changes for each scope + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + + # Check if any updates failed + failures = [scope for scope, result in results.items() if not result['success']] + + if failures: + failure_messages = '\n'.join([f"- {failed_scope}: {results[failed_scope]['message']}" for failed_scope in failures]) + raise Exception(f"Failed to apply patch on the following scopes:\n{failure_messages}") + + log.log_notice(f"Patch applied successfully for {patch}.") click.secho("Patch applied successfully.", fg="cyan", underline=True) except Exception as ex: - click.secho("Failed to apply patch", fg="red", underline=True, err=True) + click.secho("Failed to apply patch due to: {}".format(ex), fg="red", underline=True, err=True) ctx.fail(ex) @config.command() @@ -1707,6 +1762,15 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") + # check if golden_config exists if override flag is set + if override_config: + if golden_config_path is None: + golden_config_path = DEFAULT_GOLDEN_CONFIG_DB_FILE + if not os.path.isfile(golden_config_path): + click.secho("Cannot find '{}'!".format(golden_config_path), + fg='magenta') + raise click.Abort() + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") @@ -1743,7 +1807,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, update_sonic_environment() if os.path.isfile('/etc/sonic/acl.json'): - clicommon.run_command(['acl-loader', 'update', 'full', '/etc/sonic/acl.json'], display_cmd=True) + clicommon.run_command(['acl-loader', 'update', 'full', '/etc/sonic/acl.json', '--skip_action_validation'], display_cmd=True) # Load port_config.json try: @@ -1778,12 +1842,6 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, # Load golden_config_db.json if override_config: - if golden_config_path is None: - golden_config_path = DEFAULT_GOLDEN_CONFIG_DB_FILE - if not os.path.isfile(golden_config_path): - click.secho("Cannot find '{}'!".format(golden_config_path), - fg='magenta') - raise click.Abort() override_config_by(golden_config_path) # Invoke platform script if available before starting the services @@ -1951,6 +2009,9 @@ def override_config_table(db, input_config_db, dry_run): # Use deepcopy by default to avoid modifying input config updated_config = update_config(current_config, ns_config_input) + # Enable YANG hard dependecy check to exit early if not satisfied + table_hard_dependency_check(updated_config) + yang_enabled = device_info.is_yang_config_validation_enabled(config_db) if yang_enabled: # The ConfigMgmt will load YANG and running @@ -1966,6 +2027,18 @@ def override_config_table(db, input_config_db, dry_run): validate_config_by_cm(cm, ns_config_input, "config_input") # Validate updated whole config validate_config_by_cm(cm, updated_config, "updated_config") + else: + cm = None + try: + # YANG validate of config minigraph generated + cm = ConfigMgmt(configdb=config_db) + cm.validateConfigData() + except Exception as ex: + log.log_warning("Failed to validate running config. Alerting: {}".format(ex)) + + # YANG validate config of minigraph generated overriden by golden config + if cm: + validate_config_by_cm_alerting(cm, updated_config, "updated_config") if dry_run: print(json.dumps(updated_config, sort_keys=True, @@ -1984,6 +2057,15 @@ def validate_config_by_cm(cm, config_json, jname): sys.exit(1) +def validate_config_by_cm_alerting(cm, config_json, jname): + tmp_config_json = copy.deepcopy(config_json) + try: + cm.loadData(tmp_config_json) + cm.validateConfigData() + except Exception as ex: + log.log_warning("Failed to validate {}. Alerting: {}".format(jname, ex)) + + def override_config_db(config_db, config_input): # Deserialized golden config to DB recognized format sonic_cfggen.FormatConverter.to_deserialized(config_input) @@ -1997,6 +2079,22 @@ def override_config_db(config_db, config_input): click.echo("Overriding completed. No service is restarted.") +def table_hard_dependency_check(config_json): + aaa_table_hard_dependency_check(config_json) + + +def aaa_table_hard_dependency_check(config_json): + AAA_TABLE = config_json.get("AAA", {}) + TACPLUS_TABLE = config_json.get("TACPLUS", {}) + + aaa_authentication_login = AAA_TABLE.get("authentication", {}).get("login", "") + tacacs_enable = "tacacs+" in aaa_authentication_login.split(",") + tacplus_passkey = TACPLUS_TABLE.get("global", {}).get("passkey", "") + if tacacs_enable and len(tacplus_passkey) == 0: + click.secho("Authentication with 'tacacs+' is not allowed when passkey not exits.", fg="magenta") + sys.exit(1) + + # # 'hostname' command # @@ -2033,7 +2131,7 @@ def synchronous_mode(sync_mode): if ADHOC_VALIDATION: if sync_mode != 'enable' and sync_mode != 'disable': raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode) - + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() try: @@ -2041,7 +2139,7 @@ def synchronous_mode(sync_mode): except ValueError as e: ctx = click.get_current_context() ctx.fail("Error: Invalid argument %s, expect either enable or disable" % sync_mode) - + click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n Option 1. config save -y \n config reload -y \n @@ -2107,7 +2205,7 @@ def portchannel(db, ctx, namespace): @click.pass_context def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): """Add port channel""" - + fvs = { 'admin_status': 'up', 'mtu': '9100', @@ -2119,7 +2217,7 @@ def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): fvs['min_links'] = str(min_links) if fallback != 'false': fvs['fallback'] = 'true' - + db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_portchannel_name_valid(portchannel_name) != True: @@ -2127,18 +2225,18 @@ def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) if is_portchannel_present_in_db(db, portchannel_name): ctx.fail("{} already exists!".format(portchannel_name)) # TODO: MISSING CONSTRAINT IN YANG MODEL - + try: db.set_entry('PORTCHANNEL', portchannel_name, fvs) except ValueError: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'".format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) - + @portchannel.command('del') @click.argument('portchannel_name', metavar='', required=True) @click.pass_context def remove_portchannel(ctx, portchannel_name): """Remove port channel""" - + db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_portchannel_name_valid(portchannel_name) != True: @@ -2156,7 +2254,7 @@ def remove_portchannel(ctx, portchannel_name): if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0: # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name)) - + try: db.set_entry('PORTCHANNEL', portchannel_name, None) except JsonPatchConflict: @@ -2174,7 +2272,7 @@ def portchannel_member(ctx): def add_portchannel_member(ctx, portchannel_name, port_name): """Add member to port channel""" db = ValidatedConfigDBConnector(ctx.obj['db']) - + if ADHOC_VALIDATION: if clicommon.is_port_mirror_dst_port(db, port_name): ctx.fail("{} is configured as mirror destination port".format(port_name)) # TODO: MISSING CONSTRAINT IN YANG MODEL @@ -2191,7 +2289,7 @@ def add_portchannel_member(ctx, portchannel_name, port_name): # Dont proceed if the port channel does not exist if is_portchannel_present_in_db(db, portchannel_name) is False: ctx.fail("{} is not present.".format(portchannel_name)) - + # Don't allow a port to be member of port channel if it is configured with an IP address for key,value in db.get_table('INTERFACE').items(): if type(key) == tuple: @@ -2229,7 +2327,7 @@ def add_portchannel_member(ctx, portchannel_name, port_name): member_port_speed = member_port_entry.get(PORT_SPEED) port_speed = port_entry.get(PORT_SPEED) # TODO: MISSING CONSTRAINT IN YANG MODEL - if member_port_speed != port_speed: + if member_port_speed != port_speed: ctx.fail("Port speed of {} is different than the other members of the portchannel {}" .format(port_name, portchannel_name)) @@ -2302,7 +2400,7 @@ def del_portchannel_member(ctx, portchannel_name, port_name): # Dont proceed if the the port is not an existing member of the port channel if not is_port_member_of_this_portchannel(db, port_name, portchannel_name): ctx.fail("{} is not a member of portchannel {}".format(port_name, portchannel_name)) - + try: db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None) except JsonPatchConflict: @@ -2489,7 +2587,7 @@ def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer if not namespaces['front_ns']: config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if ADHOC_VALIDATION: + if ADHOC_VALIDATION: if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: return try: @@ -2944,7 +3042,14 @@ def _qos_update_ports(ctx, ports, dry_run, json_data): for table_name in tables_multi_index: entries = config_db.get_keys(table_name) for key in entries: - port, _ = key + # Add support for chassis/multi-dut: + # on a single-dut, key = ('Ethernet136', '6') + # while on a chassis, key = ('str2-chassis-lcx-1', 'Asic0', 'Ethernet84', '5') + for element in key: + if element.startswith('Eth'): + port = element + break + assert port is not None, "Port is not found in config DB" if not port in portset_to_handle: continue config_db.set_entry(table_name, '|'.join(key), None) @@ -3452,7 +3557,7 @@ def del_community(db, community): if community not in snmp_communities: click.echo("SNMP community {} is not configured".format(community)) sys.exit(1) - + config_db = ValidatedConfigDBConnector(db.cfgdb) try: config_db.set_entry('SNMP_COMMUNITY', community, None) @@ -4511,6 +4616,10 @@ def ip(ctx): """Set IP interface attributes""" pass +def validate_vlan_exists(db,text): + data = db.get_table('VLAN') + keys = list(data.keys()) + return text in keys # # 'add' subcommand # @@ -4574,6 +4683,12 @@ def add(ctx, interface_name, ip_addr, gw): table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + + if table_name == "VLAN_INTERFACE": + if not validate_vlan_exists(config_db, interface_name): + ctx.fail(f"Error: {interface_name} does not exist. Vlan must be created before adding an IP address") + return + interface_entry = config_db.get_entry(table_name, interface_name) if len(interface_entry) == 0: if table_name == "VLAN_SUB_INTERFACE": @@ -4630,17 +4745,16 @@ def remove(ctx, interface_name, ip_addr): if output != "": if any(interface_name in output_line for output_line in output.splitlines()): ctx.fail("Cannot remove the last IP entry of interface {}. A static {} route is still bound to the RIF.".format(interface_name, ip_ver)) - remove_router_interface_ip_address(config_db, interface_name, ip_address) - interface_addresses = get_interface_ipaddresses(config_db, interface_name) - if len(interface_addresses) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False and get_intf_ipv6_link_local_mode(ctx, interface_name, table_name) != "enable": - if table_name != "VLAN_SUB_INTERFACE": - config_db.set_entry(table_name, interface_name, None) - if multi_asic.is_multi_asic(): command = ['sudo', 'ip', 'netns', 'exec', str(ctx.obj['namespace']), 'ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] else: command = ['ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] clicommon.run_command(command) + remove_router_interface_ip_address(config_db, interface_name, ip_address) + interface_addresses = get_interface_ipaddresses(config_db, interface_name) + if len(interface_addresses) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False and get_intf_ipv6_link_local_mode(ctx, interface_name, table_name) != "enable": + if table_name != "VLAN_SUB_INTERFACE": + config_db.set_entry(table_name, interface_name, None) # # 'loopback-action' subcommand @@ -4995,7 +5109,7 @@ def cable_length(ctx, interface_name, length): if not is_dynamic_buffer_enabled(config_db): ctx.fail("This command can only be supported on a system with dynamic buffer enabled") - + if ADHOC_VALIDATION: # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) @@ -5134,6 +5248,43 @@ def reset(ctx, interface_name): cmd = ['sudo', 'sfputil', 'reset', str(interface_name)] clicommon.run_command(cmd) +# +# 'dom' subcommand ('config interface transceiver dom ...') +# This command is supported only for +# 1. non-breakout ports (subport = 0 or subport field is absent in CONFIG_DB) +# 2. first subport of breakout ports (subport = 1) + +@transceiver.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('desired_config', metavar='(enable|disable)', type=click.Choice(['enable', 'disable'])) +@click.pass_context +def dom(ctx, interface_name, desired_config): + """Enable/disable DOM monitoring for SFP transceiver module""" + log.log_info("interface transceiver dom {} {} executing...".format(interface_name, desired_config)) + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(config_db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + port_table_entry = config_db.get_entry("PORT", interface_name) + if not port_table_entry: + ctx.fail("Interface {} does not exist".format(interface_name)) + + # We are handling port configuration only for the below mentioned scenarios + # Port is a non-breakout port (subport = 0 or subport field is absent in CONFIG_DB) + # Port is first subport of breakout ports (subport = 1) + # If the port is not in the above mentioned scenarios, then fail the command + if port_table_entry.get("subport", '0') not in DOM_CONFIG_SUPPORTED_SUBPORTS: + ctx.fail("DOM monitoring config only supported for subports {}".format(DOM_CONFIG_SUPPORTED_SUBPORTS)) + else: + config_db.mod_entry("PORT", interface_name, {"dom_polling": "disabled" if desired_config == "disable" else "enabled"}) + # # 'mpls' subgroup ('config interface mpls ...') # @@ -5303,7 +5454,7 @@ def unbind(ctx, interface_name): config_db.set_entry(table_name, interface_name, subintf_entry) else: config_db.set_entry(table_name, interface_name, None) - + click.echo("Interface {} IP disabled and address(es) removed due to unbinding VRF.".format(interface_name)) # # 'ipv6' subgroup ('config interface ipv6 ...') @@ -6481,7 +6632,7 @@ def add_loopback(ctx, loopback_name): lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple] if loopback_name in lo_intfs: ctx.fail("{} already exists".format(loopback_name)) # TODO: MISSING CONSTRAINT IN YANG VALIDATION - + try: config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"}) except ValueError: @@ -6505,7 +6656,7 @@ def del_loopback(ctx, loopback_name): ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ] for ip in ips: config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None) - + try: config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None) except JsonPatchConflict: @@ -6563,9 +6714,9 @@ def ntp(ctx): def add_ntp_server(ctx, ntp_ip_address): """ Add NTP server IP """ if ADHOC_VALIDATION: - if not clicommon.is_ipaddress(ntp_ip_address): + if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid IP address') - db = ValidatedConfigDBConnector(ctx.obj['db']) + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: click.echo("NTP server {} is already configured".format(ntp_ip_address)) @@ -6576,7 +6727,7 @@ def add_ntp_server(ctx, ntp_ip_address): {'resolve_as': ntp_ip_address, 'association_type': 'server'}) except ValueError as e: - ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") @@ -6592,7 +6743,7 @@ def del_ntp_server(ctx, ntp_ip_address): if ADHOC_VALIDATION: if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid IP address') - db = ValidatedConfigDBConnector(ctx.obj['db']) + db = ValidatedConfigDBConnector(ctx.obj['db']) ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: try: @@ -6920,19 +7071,19 @@ def add(ctx, name, ipaddr, port, vrf): if not is_valid_collector_info(name, ipaddr, port, vrf): return - config_db = ValidatedConfigDBConnector(ctx.obj['db']) + config_db = ValidatedConfigDBConnector(ctx.obj['db']) collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2): click.echo("Only 2 collectors can be configured, please delete one") return - + try: config_db.mod_entry('SFLOW_COLLECTOR', name, {"collector_ip": ipaddr, "collector_port": port, "collector_vrf": vrf}) except ValueError as e: - ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) return # @@ -7265,7 +7416,7 @@ def add_subinterface(ctx, subinterface_name, vid): if vid is not None: subintf_dict.update({"vlan" : vid}) subintf_dict.update({"admin_status" : "up"}) - + try: config_db.set_entry('VLAN_SUB_INTERFACE', subinterface_name, subintf_dict) except ValueError as e: diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index 75846d54e3e..19b39b4336d 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -216,7 +216,7 @@ def sdk_sniffer_enable(): env_variable_string=sdk_sniffer_env_variable_string) if not ignore: err = restart_swss() - if err is not 0: + if err != 0: return click.echo('SDK sniffer is Enabled, recording file is %s.' % sdk_sniffer_filename) else: @@ -229,7 +229,7 @@ def sdk_sniffer_disable(): ignore = sniffer_env_variable_set(enable=False, env_variable_name=ENV_VARIABLE_SX_SNIFFER) if not ignore: err = restart_swss() - if err is not 0: + if err != 0: return click.echo("SDK sniffer is Disabled.") else: diff --git a/config/syslog.py b/config/syslog.py index 7533a7f71f2..a5d520d9cf7 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -5,7 +5,9 @@ import subprocess import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util from sonic_py_common import logger +from sonic_py_common import multi_asic from syslog_util import common as syslog_common @@ -457,20 +459,46 @@ def delete(db, server_ip_address): def rate_limit_host(db, interval, burst): """ Configure syslog rate limit for host """ syslog_common.rate_limit_validator(interval, burst) - syslog_common.save_rate_limit_to_db(db, None, interval, burst, log) + syslog_common.save_rate_limit_to_db(db.cfgdb, None, interval, burst, log) @syslog.command("rate-limit-container") @click.argument("service_name", required=True) @click.option("-i", "--interval", help="Configures syslog rate limit interval in seconds for specified containers", type=click.IntRange(0, 2147483647)) @click.option("-b", "--burst", help="Configures syslog rate limit burst in number of messages for specified containers", type=click.IntRange(0, 2147483647)) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def rate_limit_container(db, service_name, interval, burst): +def rate_limit_container(db, service_name, interval, burst, namespace): """ Configure syslog rate limit for containers """ syslog_common.rate_limit_validator(interval, burst) - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + # for all namespaces + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace == multi_asic.DEFAULT_NAMESPACE: + feature_data = global_feature_data + else: + feature_data = per_ns_feature_data + if service_name and service_name not in feature_data: + continue + syslog_common.service_validator(feature_data, service_name) + syslog_common.save_rate_limit_to_db(cfg_db, service_name, interval, burst, log) + return + elif namespace == 'default': + # for default/global namespace only + namespace = multi_asic.DEFAULT_NAMESPACE + feature_data = global_feature_data + else: + # for a specific namespace + feature_data = per_ns_feature_data + syslog_common.service_validator(feature_data, service_name) - syslog_common.save_rate_limit_to_db(db, service_name, interval, burst, log) + syslog_common.save_rate_limit_to_db(db.cfgdb_clients[namespace], service_name, interval, burst, log) @syslog.group( @@ -482,14 +510,70 @@ def rate_limit_feature(): pass +def get_feature_names_to_proceed(db, service_name, namespace): + """Get feature name list to be proceed by "config syslog rate-limit-feature enable" and + "config syslog rate-limit-feature disable" CLIs + + Args: + db (object): Db object + service_name (str): Nullable service name to be enable/disable + namespace (str): Namespace provided by user + + Returns: + list: A list of feature name + """ + features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) + if service_name: + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + if not service_name: + feature_list = [feature_name for feature_name in global_feature_data.keys()] + if multi_asic.is_multi_asic(): + asic_count = multi_asic.get_num_asics() + for i in range(asic_count): + feature_list.extend([f'{feature_name}{i}' for feature_name in per_ns_feature_data.keys()]) + else: + feature_config = features[service_name] + feature_list = [] + if feature_config[syslog_common.FEATURE_HAS_GLOBAL_SCOPE].lower() == 'true': + feature_list.append(service_name) + + if multi_asic.is_multi_asic(): + if feature_config[syslog_common.FEATURE_HAS_PER_ASIC_SCOPE].lower() == 'true': + asic_count = multi_asic.get_num_asics() + for i in range(asic_count): + feature_list.append(multi_asic.get_container_name_from_asic_id(service_name, i)) + elif namespace == 'default': + if not service_name: + feature_list = [feature_name for feature_name in global_feature_data.keys()] + else: + syslog_common.service_validator(global_feature_data, service_name) + feature_list = [service_name] + else: + asic_num = multi_asic.get_asic_id_from_name(namespace) + if not service_name: + feature_list = [multi_asic.get_container_name_from_asic_id(feature_name, asic_num) for feature_name in per_ns_feature_data.keys()] + else: + syslog_common.service_validator(per_ns_feature_data, service_name) + feature_list = [multi_asic.get_container_name_from_asic_id(service_name, asic_num)] + return feature_list + + @rate_limit_feature.command("enable") +@click.argument("service_name", required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def enable_rate_limit_feature(db): +def enable_rate_limit_feature(db, service_name, namespace): """ Enable syslog rate limit feature """ - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - for feature_name in feature_data.keys(): + feature_list = get_feature_names_to_proceed(db, service_name, namespace) + for feature_name in feature_list: click.echo(f'Enabling syslog rate limit feature for {feature_name}') - output, _ = clicommon.run_command(['docker', 'ps', '-q', '-f', 'status=running', '-f', f'name={feature_name}'], return_cmd=True) + shell_cmd = f'docker ps -f status=running --format "{{{{.Names}}}}" | grep -E "^{feature_name}$"' + output, _ = clicommon.run_command(shell_cmd, return_cmd=True, shell=True) if not output: click.echo(f'{feature_name} is not running, ignoring...') continue @@ -517,16 +601,21 @@ def enable_rate_limit_feature(db): if not failed: click.echo(f'Enabled syslog rate limit feature for {feature_name}') - - + + @rate_limit_feature.command("disable") +@click.argument("service_name", required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def disable_rate_limit_feature(db): +def disable_rate_limit_feature(db, service_name, namespace): """ Disable syslog rate limit feature """ - feature_data = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - for feature_name in feature_data.keys(): + feature_list = get_feature_names_to_proceed(db, service_name, namespace) + for feature_name in feature_list: click.echo(f'Disabling syslog rate limit feature for {feature_name}') - output, _ = clicommon.run_command(['docker', 'ps', '-q', '-f', 'status=running', '-f', f'name={feature_name}'], return_cmd=True) + shell_cmd = f'docker ps -f status=running --format "{{{{.Names}}}}" | grep -E "^{feature_name}$"' + output, _ = clicommon.run_command(shell_cmd, return_cmd=True, shell=True) if not output: click.echo(f'{feature_name} is not running, ignoring...') continue @@ -553,4 +642,3 @@ def disable_rate_limit_feature(db): if not failed: click.echo(f'Disabled syslog rate limit feature for {feature_name}') - diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index fb0c94aead8..e97922af656 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3844,6 +3844,21 @@ This command sets the number of consecutive polls in which no error is detected admin@sonic:~$ config fabric port monitor poll threshold recovery 5 -n asic0 ``` +**config fabric port monitor state ** + +This command sets the monitor state in CONFIG_DB to enable/disable the fabric monitor feature. + +- Usage: + ``` + config fabric port monitor state [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port monitor state enable + admin@sonic:~$ config fabric port monitor state disable + ``` + ## Feature SONiC includes a capability in which Feature state can be enabled/disabled @@ -5281,6 +5296,22 @@ This command is used to reset an SFP transceiver Resetting port Ethernet0... OK ``` +**config interface transceiver dom** + +This command is used to configure the Digital Optical Monitoring (DOM) for an interface. + +- Usage: + ``` + config interface transceiver dom (enable | disable) + ``` + +- Examples: + ``` + user@sonic~$ sudo config interface transceiver dom Ethernet0 enable + + user@sonic~$ sudo config interface transceiver dom Ethernet0 disable + ``` + **config interface mtu (Versions >= 201904)** This command is used to configure the mtu for the Physical interface. Use the value 1500 for setting max transfer unit size to 1500 bytes. @@ -10118,7 +10149,7 @@ This command displays rate limit configuration for containers. - Usage ``` - show syslog rate-limit-container [] + show syslog rate-limit-container [] -n [] ``` - Example: @@ -10142,6 +10173,37 @@ This command displays rate limit configuration for containers. SERVICE INTERVAL BURST -------------- ---------- ------- bgp 0 0 + + # Multi ASIC + show syslog rate-limit-container + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 N/A + snmp 300 20000 + swss 2000 12000 + Namespace asic0: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 N/A + snmp 300 20000 + swss 2000 12000 + + # Multi ASIC + show syslog rate-limit-container bgp + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 + Namespace asic0: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 + + # Multi ASIC + show syslog rate-limit-container bgp -n asic1 + Namespace asic1: + SERVICE INTERVAL BURST + -------- ---------- -------- + bgp 500 5000 ``` ### Syslog Config Commands @@ -10220,10 +10282,19 @@ This command is used to configure syslog rate limit for containers. - Parameters: - _interval_: determines the amount of time that is being measured for rate limiting. - _burst_: defines the amount of messages, that have to occur in the time limit of interval, to trigger rate limiting + - _namespace_: namespace name or all. Value "default" indicates global namespace. - Example: ``` + # Config bgp for all namespaces. For multi ASIC platforms, bgp service in all namespaces will be affected. + # For single ASIC platforms, bgp service in global namespace will be affected. admin@sonic:~$ sudo config syslog rate-limit-container bgp --interval 300 --burst 20000 + + # Config bgp for global namespace only. + config syslog rate-limit-container bgp --interval 300 --burst 20000 -n default + + # Config bgp for asic0 namespace only. + config syslog rate-limit-container bgp --interval 300 --burst 20000 -n asic0 ``` **config syslog rate-limit-feature enable** @@ -10232,12 +10303,28 @@ This command is used to enable syslog rate limit feature. - Usage: ``` - config syslog rate-limit-feature enable + config syslog rate-limit-feature enable [] -n [] ``` - Example: ``` + # Enable syslog rate limit for all services in all namespaces admin@sonic:~$ sudo config syslog rate-limit-feature enable + + # Enable syslog rate limit for all services in global namespace + config syslog rate-limit-feature enable -n default + + # Enable syslog rate limit for all services in asic0 namespace + config syslog rate-limit-feature enable -n asic0 + + # Enable syslog rate limit for database in all namespaces + config syslog rate-limit-feature enable database + + # Enable syslog rate limit for database in default namespace + config syslog rate-limit-feature enable database -n default + + # Enable syslog rate limit for database in asci0 namespace + config syslog rate-limit-feature enable database -n asci0 ``` **config syslog rate-limit-feature disable** @@ -10246,12 +10333,28 @@ This command is used to disable syslog rate limit feature. - Usage: ``` - config syslog rate-limit-feature disable + config syslog rate-limit-feature disable [] -n [] ``` - Example: ``` + # Disable syslog rate limit for all services in all namespaces admin@sonic:~$ sudo config syslog rate-limit-feature disable + + # Disable syslog rate limit for all services in global namespace + config syslog rate-limit-feature disable -n default + + # Disable syslog rate limit for all services in asic0 namespace + config syslog rate-limit-feature disable -n asic0 + + # Disable syslog rate limit for database in all namespaces + config syslog rate-limit-feature disable database + + # Disable syslog rate limit for database in default namespace + config syslog rate-limit-feature disable database -n default + + # Disable syslog rate limit for database in asci0 namespace + config syslog rate-limit-feature disable database -n asci0 ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index d0818172f8f..32a356bf9ae 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -1,12 +1,14 @@ import copy import json +import subprocess import jsondiff import importlib import os import tempfile from collections import defaultdict from swsscommon.swsscommon import ConfigDBConnector -from .gu_common import genericUpdaterLogging +from sonic_py_common import multi_asic +from .gu_common import GenericConfigUpdaterError, genericUpdaterLogging SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" @@ -32,12 +34,11 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(): - config_db = ConfigDBConnector() +def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() return config_db - def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -73,8 +74,9 @@ class ChangeApplier: updater_conf = None - def __init__(self): - self.config_db = get_config_db() + def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + self.config_db = get_config_db(self.namespace) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -160,18 +162,32 @@ def apply(self, change): log_error("Failed to apply Json change") return ret - def remove_backend_tables_from_config(self, data): for key in self.backend_tables: data.pop(key, None) - def _get_running_config(self): - (_, fname) = tempfile.mkstemp(suffix="_changeApplier") - os.system("sonic-cfggen -d --print-data > {}".format(fname)) - run_data = {} - with open(fname, "r") as s: - run_data = json.load(s) - if os.path.isfile(fname): + _, fname = tempfile.mkstemp(suffix="_changeApplier") + + if self.namespace: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + + with open(fname, "w") as file: + result = subprocess.Popen(cmd, stdout=file, stderr=subprocess.PIPE, text=True) + _, err = result.communicate() + + return_code = result.returncode + if return_code: os.remove(fname) + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + + run_data = {} + try: + with open(fname, "r") as file: + run_data = json.load(file) + finally: + if os.path.isfile(fname): + os.remove(fname) return run_data diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 6fa65be21fb..68e49b6c03d 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5600"] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index f9aab823365..b75939749ce 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,4 +1,5 @@ import json +import jsonpointer import os from enum import Enum from .gu_common import GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ @@ -6,10 +7,37 @@ from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier +from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" +def extract_scope(path): + if not path: + raise Exception("Wrong patch with empty path.") + + try: + pointer = jsonpointer.JsonPointer(path) + parts = pointer.parts + except Exception as e: + raise Exception(f"Error resolving path: '{path}' due to {e}") + + if not parts: + raise Exception("Wrong patch with empty path.") + if parts[0].startswith("asic"): + if not parts[0][len("asic"):].isnumeric(): + raise Exception(f"Error resolving path: '{path}' due to incorrect ASIC number.") + scope = parts[0] + remainder = "/" + "/".join(parts[1:]) + elif parts[0] == "localhost": + scope = "localhost" + remainder = "/" + "/".join(parts[1:]) + else: + scope = "" + remainder = path + + return scope, remainder + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -29,77 +57,82 @@ def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, - patch_wrapper=None): + patch_wrapper=None, + namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) def apply(self, patch, sort=True): - self.logger.log_notice("Patch application starting.") - self.logger.log_notice(f"Patch: {patch}") + scope = self.namespace if self.namespace else 'localhost' + self.logger.log_notice(f"{scope}: Patch application starting.") + self.logger.log_notice(f"{scope}: Patch: {patch}") # Get old config - self.logger.log_notice("Getting current config db.") + self.logger.log_notice(f"{scope} getting current config db.") old_config = self.config_wrapper.get_config_db_as_json() # Generate target config - self.logger.log_notice("Simulating the target full config after applying the patch.") + self.logger.log_notice(f"{scope}: simulating the target full config after applying the patch.") target_config = self.patch_wrapper.simulate_patch(patch, old_config) - + # Validate all JsonPatch operations on specified fields - self.logger.log_notice("Validating all JsonPatch operations are permitted on the specified fields") + self.logger.log_notice(f"{scope}: validating all JsonPatch operations are permitted on the specified fields") self.config_wrapper.validate_field_operation(old_config, target_config) # Validate target config does not have empty tables since they do not show up in ConfigDb - self.logger.log_notice("Validating target config does not have empty tables, " \ + self.logger.log_notice(f"{scope}: alidating target config does not have empty tables, " \ "since they do not show up in ConfigDb.") empty_tables = self.config_wrapper.get_empty_tables(target_config) if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError("Given patch is not valid because it will result in empty tables " \ + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ "which is not allowed in ConfigDb. " \ f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: - self.logger.log_notice("Sorting patch updates.") + self.logger.log_notice(f"{scope}: sorting patch updates.") changes = self.patchsorter.sort(patch) else: - self.logger.log_notice("Converting patch to JsonChange.") + self.logger.log_notice(f"{scope}: converting patch to JsonChange.") changes = [JsonChange(jsonpatch.JsonPatch([element])) for element in patch] - + changes_len = len(changes) - self.logger.log_notice(f"The patch was converted into {changes_len} " \ + self.logger.log_notice(f"The {scope} patch was converted into {changes_len} " \ f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") for change in changes: self.logger.log_notice(f" * {change}") # Apply changes in order - self.logger.log_notice(f"Applying {changes_len} change{'s' if changes_len != 1 else ''} " \ + self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") for change in changes: self.logger.log_notice(f" * {change}") self.changeapplier.apply(change) # Validate config updated successfully - self.logger.log_notice("Verifying patch updates are reflected on ConfigDB.") + self.logger.log_notice(f"{scope}: verifying patch updates are reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) if not(self.patch_wrapper.verify_same_json(target_config, new_config)): - raise GenericConfigUpdaterError(f"After applying patch to config, there are still some parts not updated") + raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") + + self.logger.log_notice(f"{scope} patch application completed.") - self.logger.log_notice("Patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None): + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier() - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -122,15 +155,18 @@ def replace(self, target_config): self.logger.log_notice("Config replacement completed.") + class FileSystemConfigRollbacker: def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, - config_wrapper=None): + config_wrapper=None, + namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer() - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") @@ -168,7 +204,7 @@ def checkpoint(self, checkpoint_name): def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") - + self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") if not self._checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") @@ -236,12 +272,13 @@ def _delete_checkpoint(self, name): path = self._get_checkpoint_full_path(name) return os.remove(path) + class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None): + def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self) - ConfigReplacer.__init__(self) - FileSystemConfigRollbacker.__init__(self) + PatchApplier.__init__(self, namespace=namespace) + ConfigReplacer.__init__(self, namespace=namespace) + FileSystemConfigRollbacker.__init__(self, namespace=namespace) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer @@ -265,10 +302,12 @@ def list_checkpoints(self): def delete_checkpoint(self, checkpoint_name): self.decorated_config_rollbacker.delete_checkpoint(checkpoint_name) + class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer) + def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) + self.namespace = namespace self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -280,13 +319,15 @@ def replace(self, target_config): config_db_target_config = self.config_wrapper.convert_sonic_yang_to_config_db(target_config) Decorator.replace(self, config_db_target_config) + class ConfigLockDecorator(Decorator): def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, - config_lock = ConfigLock()): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker) + config_lock=ConfigLock(), + namespace=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) self.config_lock = config_lock @@ -307,28 +348,35 @@ def execute_write_action(self, action, *args): action(*args) self.config_lock.release_lock() + class GenericUpdateFactory: + def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: - patch_applier = SonicYangDecorator( - decorated_patch_applier = patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper, + namespace=self.namespace) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier = patch_applier) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) return patch_applier @@ -337,24 +385,27 @@ def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yan config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper) + config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: - config_replacer = SonicYangDecorator( - decorated_config_replacer = config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper, + namespace=self.namespace) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer = config_replacer) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) return config_replacer @@ -363,18 +414,19 @@ def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_table config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper) + patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, - changeapplier=change_applier) + changeapplier=change_applier, + namespace=self.namespace) - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper = config_wrapper, config_replacer = config_replacer) + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker = config_rollbacker) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) return config_rollbacker @@ -383,15 +435,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper() + return DryRunConfigWrapper(namespace=self.namespace) else: - return ConfigWrapper() + return ConfigWrapper(namespace=self.namespace) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier() + return ChangeApplier(namespace=self.namespace) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -408,10 +460,11 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, return NonStrictPatchSorter(config_wrapper, patch_wrapper, config_splitter) + class GenericUpdater: - def __init__(self, generic_update_factory=None): + def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory() + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index a6cb8de0944..974c540c07a 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -9,7 +9,7 @@ import copy import re import os -from sonic_py_common import logger +from sonic_py_common import logger, multi_asic from enum import Enum YANG_DIR = "/usr/local/yang-models" @@ -52,7 +52,8 @@ def __eq__(self, other): return False class ConfigWrapper: - def __init__(self, yang_dir = YANG_DIR): + def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None @@ -63,13 +64,16 @@ def get_config_db_as_json(self): return config_db_json def _get_config_db_as_text(self): - # TODO: Getting configs from CLI is very slow, need to get it from sonic-cffgen directly - cmd = "show runningconfiguration all" - result = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + + result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) text, err = result.communicate() return_code = result.returncode if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") return text def get_sonic_yang_as_json(self): @@ -147,12 +151,12 @@ def validate_config_db_config(self, config_db_as_json): def validate_field_operation(self, old_config, target_config): """ - Some fields in ConfigDB are restricted and may not allow third-party addition, replacement, or removal. - Because YANG only validates state and not transitions, this method helps to JsonPatch operations/transitions for the specified fields. + Some fields in ConfigDB are restricted and may not allow third-party addition, replacement, or removal. + Because YANG only validates state and not transitions, this method helps to JsonPatch operations/transitions for the specified fields. """ patch = jsonpatch.JsonPatch.from_diff(old_config, target_config) - - # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation + + # illegal_operations_to_fields_map['remove'] yields a list of fields for which `remove` is an illegal operation illegal_operations_to_fields_map = { 'add':[], 'replace': [], @@ -180,7 +184,7 @@ def _invoke_validating_function(cmd, jsonpatch_element): with open(GCU_FIELD_OP_CONF_FILE, "r") as s: gcu_field_operation_conf = json.load(s) else: - raise GenericConfigUpdaterError("GCU field operation validators config file not found") + raise GenericConfigUpdaterError("GCU field operation validators config file not found") for element in patch: path = element["path"] @@ -296,8 +300,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None): - super().__init__() + def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): + super().__init__(namespace=namespace) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -317,8 +321,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None): - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): + self.namespace = namespace + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index c4d4e2da9c9..529069cdc29 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -8,10 +8,9 @@ import re from sonic_py_common import device_info, logger -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig from minigraph import parse_xml from utilities_common.helper import update_config -from utilities_common.general import load_db_config INIT_CFG_FILE = '/etc/sonic/init_cfg.json' MINIGRAPH_FILE = '/etc/sonic/minigraph.xml' @@ -808,6 +807,39 @@ def migrate_sflow_table(self): sflow_key = "SFLOW_SESSION_TABLE:{}".format(key) self.appDB.set(self.appDB.APPL_DB, sflow_key, 'sample_direction','rx') + def migrate_tacplus(self): + if not self.config_src_data or 'TACPLUS' not in self.config_src_data: + return + + tacplus_new = self.config_src_data['TACPLUS'] + log.log_notice('Migrate TACPLUS configuration') + + global_old = self.configDB.get_entry('TACPLUS', 'global') + if not global_old: + global_new = tacplus_new.get("global") + self.configDB.set_entry("TACPLUS", "global", global_new) + log.log_info('Migrate TACPLUS global: {}'.format(global_new)) + + def migrate_aaa(self): + if not self.config_src_data or 'AAA' not in self.config_src_data: + return + + aaa_new = self.config_src_data['AAA'] + log.log_notice('Migrate AAA configuration') + + authentication = self.configDB.get_entry('AAA', 'authentication') + if not authentication: + authentication_new = aaa_new.get("authentication") + self.configDB.set_entry("AAA", "authentication", authentication_new) + log.log_info('Migrate AAA authentication: {}'.format(authentication_new)) + + # setup per-command accounting + accounting = self.configDB.get_entry('AAA', 'accounting') + if not accounting: + accounting_new = aaa_new.get("accounting") + self.configDB.set_entry("AAA", "accounting", accounting_new) + log.log_info('Migrate AAA accounting: {}'.format(accounting_new)) + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -1235,6 +1267,9 @@ def common_migration_ops(self): # update FRR config mode based on minigraph parser on target image self.migrate_routing_config_mode() + self.migrate_tacplus() + self.migrate_aaa() + def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) @@ -1277,7 +1312,14 @@ def main(): socket_path = args.socket namespace = args.namespace - load_db_config() + # Can't load global config base on the result of is_multi_asic(), because on multi-asic device, when db_migrate.py + # run on the local database, ASIC instance will have not created the /var/run/redis0/sonic-db/database-config.json + if args.namespace is not None: + if not SonicDBConfig.isGlobalInit(): + SonicDBConfig.initializeGlobalConfig() + else: + if not SonicDBConfig.isInit(): + SonicDBConfig.initialize() if socket_path: dbmgtr = DBMigrator(namespace, socket=socket_path) diff --git a/scripts/dualtor_neighbor_check.py b/scripts/dualtor_neighbor_check.py index 39de3c676f9..5ceb327c4c2 100755 --- a/scripts/dualtor_neighbor_check.py +++ b/scripts/dualtor_neighbor_check.py @@ -304,12 +304,21 @@ def read_tables_from_db(appl_db): """Reads required tables from db.""" # NOTE: let's cache the db read script sha1 in APPL_DB under # key "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1" - db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY) - if not db_read_script_sha1: + def _load_script(): redis_load_cmd = "SCRIPT LOAD \"%s\"" % DB_READ_SCRIPT db_read_script_sha1 = redis_cli(redis_load_cmd).strip() WRITE_LOG_INFO("loaded script sha1: %s", db_read_script_sha1) appl_db.set(DB_READ_SCRIPT_CONFIG_DB_KEY, db_read_script_sha1) + return db_read_script_sha1 + + def _is_script_existed(script_sha1): + redis_script_exists_cmd = "SCRIPT EXISTS %s" % script_sha1 + cmd_output = redis_cli(redis_script_exists_cmd).strip() + return "1" in cmd_output + + db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY) + if ((not db_read_script_sha1) or (not _is_script_existed(db_read_script_sha1))): + db_read_script_sha1 = _load_script() redis_run_cmd = "EVALSHA %s 0" % db_read_script_sha1 result = redis_cli(redis_run_cmd).strip() diff --git a/scripts/fabricstat b/scripts/fabricstat index 205e3170bc8..cf3d14bf5e9 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -1,19 +1,21 @@ #!/usr/bin/env python3 import argparse -from collections import OrderedDict, namedtuple +import click import json import os import sys +import utilities_common.multi_asic as multi_asic_util -from utilities_common import constants -from utilities_common.cli import json_serial, UserCache -from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate +from collections import OrderedDict, namedtuple +from datetime import datetime, timezone, timedelta from natsort import natsorted -from tabulate import tabulate from sonic_py_common import multi_asic from swsscommon.swsscommon import APP_FABRIC_PORT_TABLE_NAME, COUNTERS_TABLE, COUNTERS_FABRIC_PORT_NAME_MAP, COUNTERS_FABRIC_QUEUE_NAME_MAP -import utilities_common.multi_asic as multi_asic_util +from tabulate import tabulate +from utilities_common import constants +from utilities_common.cli import json_serial, UserCache +from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate # mock the redis for unit test purposes # try: @@ -280,6 +282,62 @@ class FabricQueueStat(FabricStat): print(tabulate(table, queuestat_header, tablefmt='simple', stralign='right')) print() +class FabricCapacity(FabricStat): + def __init__(self, namespace, table_cnt, threshold): + self.db = None + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace) + self.table_cnt = table_cnt + self.threshold = threshold + + def capacity_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get fabric capacity data from STATE_DB table FABRIC_CAPACITY_TABLE + # and store them in fabric_capacity_data + fabric_capacity_data = self.db.get_all(self.db.STATE_DB, "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA") + operational_fap_capacity = 0 + operational_fabric_capacity = 0 + operational_fabric_links = 0; + total_fabric_links = 0; + ratio = 0 + last_event = "None" + last_time = "Never" + + # Get data from fabric_capacity_data + if "fabric_capacity" in fabric_capacity_data: + operational_fabric_capacity = int(fabric_capacity_data['fabric_capacity']) + operational_fabric_capacity = operational_fabric_capacity/1000.0 + if "number_of_links" in fabric_capacity_data: + total_fabric_links = int(fabric_capacity_data['number_of_links']) + if "operating_links" in fabric_capacity_data: + operational_fabric_links = int(fabric_capacity_data['operating_links']) + if "warning_threshold" in fabric_capacity_data: + th = fabric_capacity_data['warning_threshold'] + th = th + "%" + self.threshold.append(th) + if "last_event" in fabric_capacity_data: + last_event = fabric_capacity_data['last_event'] + if "last_event_time" in fabric_capacity_data: + last_time = fabric_capacity_data['last_event_time'] + + # Calculate the ratio of number of operational links and all links + if total_fabric_links > 0: + ratio = operational_fabric_links/total_fabric_links*100 + + if last_time != "Never": + dt = datetime.fromtimestamp(int(last_time), timezone.utc) + td = datetime.now(timezone.utc) - dt + td_without_ms = timedelta(seconds=td.seconds) + last_time = str(td_without_ms) +" ago" + + asic_name = "asic0" + if self.namespace: + asic_name = self.namespace + + # Update the table to print + self.table_cnt.append((asic_name, operational_fabric_links, total_fabric_links, ratio, last_event, last_time)) + class FabricReachability(FabricStat): def reachability_print(self): # Connect to database @@ -307,6 +365,40 @@ class FabricReachability(FabricStat): print(tabulate(body, header, tablefmt='simple', stralign='right')) return +class FabricIsolation(FabricStat): + def isolation_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get the set of all fabric ports + port_keys = self.db.keys(self.db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + # Create a new dictionary. The keys are the local port values in integer format. + # Only fabric ports that have remote port data are added. + port_dict = {} + for port_key in port_keys: + port_data = self.db.get_all(self.db.STATE_DB, port_key) + if "REMOTE_PORT" in port_data: + port_number = int(port_key.replace("FABRIC_PORT_TABLE|PORT", "")) + port_dict.update({port_number: port_data}) + # Create ordered table of fabric ports. + header = ["Local Link", "Auto Isolated", "Manual Isolated", "Isolated"] + auto_isolated = 0 + manual_isolated = 0 + isolated = 0 + body = [] + for port_number in sorted(port_dict.keys()): + port_data = port_dict[port_number] + if "AUTO_ISOLATED" in port_data: + auto_isolated = port_data["AUTO_ISOLATED"] + if "CONFIG_ISOLATED" in port_data: + manual_isolated = port_data["CONFIG_ISOLATED"] + if "ISOLATED" in port_data: + isolated = port_data["ISOLATED"] + body.append((port_number, auto_isolated, manual_isolated, isolated)); + if self.namespace: + print(f"\n{self.namespace}") + print(tabulate(body, header, tablefmt='simple', stralign='right')) + return + def main(): global cnstat_dir global cnstat_fqn_file_port @@ -321,6 +413,8 @@ Examples: fabricstat -p -n asic0 -e fabricstat -q fabricstat -q -n asic0 + fabricstat -c + fabricstat -c -n asic0 fabricstat -C fabricstat -D """) @@ -329,12 +423,16 @@ Examples: parser.add_argument('-r','--reachability', action='store_true', help='Display reachability, otherwise port stat') parser.add_argument('-n','--namespace', default=None, help='Display fabric ports counters for specific namespace') parser.add_argument('-e', '--errors', action='store_true', help='Display errors') + parser.add_argument('-c','--capacity',action='store_true', help='Display fabric capacity') + parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') args = parser.parse_args() queue = args.queue reachability = args.reachability + capacity_status = args.capacity + isolation_status = args.isolation namespace = args.namespace errors_only = args.errors @@ -362,6 +460,10 @@ Examples: stat = FabricReachability(ns) stat.reachability_print() return + elif isolation_status: + stat = FabricIsolation(ns) + stat.isolation_print() + return else: stat = FabricPortStat(ns) cnstat_dict = stat.get_cnstat_dict() @@ -370,14 +472,36 @@ Examples: else: stat.cnstat_print(cnstat_dict, errors_only) - if namespace is None: - # All asics or all fabric asics - multi_asic = multi_asic_util.MultiAsic() - for ns in multi_asic.get_ns_list_based_on_options(): - nsStat(ns, errors_only) + if capacity_status: + # show fabric capacity command + capacity_header = [] + table_cnt = [] + threshold = [] + capacity_header = ["ASIC", "Operating\nLinks", "Total #\nof Links", "%", "Last Event", "Last Time"] + if namespace is None: + # All asics or all fabric asics + multi_asic = multi_asic_util.MultiAsic() + for ns in multi_asic.get_ns_list_based_on_options(): + stat = FabricCapacity(ns, table_cnt, threshold) + stat.capacity_print() + else: + # Asic with namespace + stat = FabricCapacity(namespace, table_cnt, threshold) + stat.capacity_print() + + click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + click.echo() + click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: - # Asic with namespace - nsStat(namespace, errors_only) + # other show fabric commands + if namespace is None: + # All asics or all fabric asics + multi_asic = multi_asic_util.MultiAsic() + for ns in multi_asic.get_ns_list_based_on_options(): + nsStat(ns, errors_only) + else: + # Asic with namespace + nsStat(namespace, errors_only) if __name__ == "__main__": main() diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 922d217e3fa..91791b37714 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -54,6 +54,7 @@ EXIT_TEAMD_RETRY_COUNT_FAILURE=23 function error() { echo $@ >&2 + logger -p user.err "Error seen during warm-reboot shutdown process: $@" } function debug() @@ -251,6 +252,7 @@ function backup_database() and not string.match(k, 'MIRROR_SESSION_TABLE|') \ and not string.match(k, 'FG_ROUTE_TABLE|') \ and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ + and not string.match(k, 'TRANSCEIVER_INFO|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then diff --git a/scripts/generate_dump b/scripts/generate_dump index 64a8917252d..9dcb62afe67 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1251,6 +1251,7 @@ collect_mellanox() { echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" fi + save_cmd "get_component_versions.py" "component_versions" } ############################################################################### @@ -1266,8 +1267,19 @@ collect_mellanox_dfw_dumps() { trap 'handle_error $? $LINENO' ERR local platform=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_platform())") local hwsku=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_hwsku())") + local def_dump_path="/var/log/mellanox/sdk-dumps" local sdk_dump_path=`cat /usr/share/sonic/device/${platform}/${hwsku}/sai.profile|grep "SAI_DUMP_STORE_PATH"|cut -d = -f2` + if [ -z $sdk_dump_path ]; then + # If the SAI_DUMP_STORE_PATH is not found in device specific sai profile, check in common sai profile + sdk_dump_path=`docker exec syncd cat /etc/mlnx/sai-common.profile | grep "SAI_DUMP_STORE_PATH" |cut -d = -f2` + if [ -z $sdk_dump_path ]; then + # If the above two mechanisms fail e.g. when syncd is not running , fallback to default sdk dump path + sdk_dump_path=$def_dump_path + fi + fi + + if [[ ! -d $sdk_dump_path ]]; then # This would mean the SAI_DUMP_STORE_PATH is not mounted on the host and is only accessible though the container # This is a bad design and not recommended But there is nothing which restricts against it and thus the special handling @@ -1553,6 +1565,27 @@ collect_cisco_8000() { echo "'/usr/share/sonic/device/${platform}' does not exist" > /tmp/error save_file /tmp/error sai false fi + + save_cmd "show platform versions" "platform.versions" + + # run 'hw-management-generate-dump.sh' script and save the result file + HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh + if [[ -x $HW_DUMP_FILE ]]; then + ${CMD_PREFIX} $HW_DUMP_FILE $ALLOW_PROCESS_STOP + ret=$? + if [[ $ret -ne 0 ]]; then + if [[ $ret -eq $TIMEOUT_EXIT_CODE ]]; then + echo "hw-management dump timedout after ${TIMEOUT_MIN} minutes." + else + echo "hw-management dump failed ..." + fi + else + save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false + rm -f /tmp/hw-mgmt-dump* + fi + else + echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" + fi } ############################################################################## diff --git a/scripts/intfutil b/scripts/intfutil index eb40a491869..69472760d85 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -29,6 +29,7 @@ from utilities_common import multi_asic as multi_asic_util from utilities_common.intf_filter import parse_interface_in_filter from utilities_common.platform_sfputil_helper import is_rj45_port, RJ45_PORT_TYPE from sonic_py_common.interface import get_intf_longname +from sonic_py_common import multi_asic # ========================== Common interface-utils logic ========================== @@ -53,6 +54,7 @@ PORT_INTERFACE_TYPE = 'interface_type' PORT_ADV_INTERFACE_TYPES = 'adv_interface_types' PORT_TPID = "tpid" OPTICS_TYPE_RJ45 = RJ45_PORT_TYPE +TYPE_DPC = 'DPU-NPU Data Port' PORT_LINK_TRAINING = 'link_training' PORT_LINK_TRAINING_STATUS = 'link_training_status' @@ -214,15 +216,17 @@ def port_oper_speed_get_raw(db, intf_name): speed = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_SPEED) return speed -def port_optics_get(state_db, intf_name, type): +def port_optics_get(db, intf_name, type): """ Get optic type info for port """ full_table_id = PORT_TRANSCEIVER_TABLE_PREFIX + intf_name - optics_type = state_db.get(state_db.STATE_DB, full_table_id, type) + optics_type = db.get(db.STATE_DB, full_table_id, type) if optics_type is None: if is_rj45_port(intf_name): return OPTICS_TYPE_RJ45 + elif db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, multi_asic.PORT_ROLE) == multi_asic.DPU_CONNECT_PORT: + return TYPE_DPC else: return "N/A" return optics_type diff --git a/scripts/ipintutil b/scripts/ipintutil index 5535bce7e7f..85879972e07 100755 --- a/scripts/ipintutil +++ b/scripts/ipintutil @@ -29,6 +29,7 @@ try: mock_tables.dbconnector.load_namespace_config() else: import mock_tables.mock_single_asic + mock_tables.mock_single_asic.add_unknown_intf=True except KeyError: pass @@ -150,7 +151,10 @@ def get_ip_intfs_in_namespace(af, namespace, display): ip_intf_attr = [] if namespace != constants.DEFAULT_NAMESPACE and skip_ip_intf_display(iface, display): continue - ipaddresses = multi_asic_util.multi_asic_get_ip_intf_addr_from_ns(namespace, iface) + try: + ipaddresses = multi_asic_util.multi_asic_get_ip_intf_addr_from_ns(namespace, iface) + except ValueError: + continue if af in ipaddresses: ifaddresses = [] bgp_neighs = {} diff --git a/scripts/mellanox_buffer_migrator.py b/scripts/mellanox_buffer_migrator.py index a01e8665f5b..4ffc900ddd9 100755 --- a/scripts/mellanox_buffer_migrator.py +++ b/scripts/mellanox_buffer_migrator.py @@ -108,10 +108,9 @@ def __init__(self, configDB, appDB, stateDB): self.spc2_platforms = ["x86_64-mlnx_msn3700-r0", "x86_64-mlnx_msn3700c-r0"] self.spc3_platforms = ["x86_64-mlnx_msn4600-r0", "x86_64-mlnx_msn4600c-r0", "x86_64-mlnx_msn4700-r0"] - msftskus = ["Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D48C8", "Mellanox-SN2700-D40C8S8", - "Mellanox-SN3800-C64", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D112C8", "Mellanox-SN3800-D28C50"] + dynamic_model_skus = ["Mellanox-SN5600-O128"] - self.is_msft_sku = self.sku in msftskus + self.is_default_traditional_model = self.sku and self.sku.startswith("Mellanox-") and not self.sku in dynamic_model_skus self.pending_update_items = list() self.default_speed_list = ['1000', '10000', '25000', '40000', '50000', '100000', '200000', '400000'] @@ -822,7 +821,7 @@ def mlnx_flush_new_buffer_configuration(self): if not self.ready: return True - if not self.is_buffer_config_default and not self.is_buffer_config_empty or self.is_msft_sku: + if not self.is_buffer_config_default and not self.is_buffer_config_empty or self.is_default_traditional_model: log.log_notice("No item pending to be updated") metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'traditional' @@ -840,7 +839,7 @@ def mlnx_flush_new_buffer_configuration(self): return True def mlnx_is_buffer_model_dynamic(self): - return self.is_buffer_config_default and not self.is_msft_sku + return self.is_buffer_config_default and not self.is_default_traditional_model def mlnx_reorganize_buffer_tables(self, buffer_table, name): """ diff --git a/scripts/port2alias b/scripts/port2alias index 5a4ff8dd79f..c993890699a 100755 --- a/scripts/port2alias +++ b/scripts/port2alias @@ -7,6 +7,7 @@ from io import StringIO from portconfig import get_port_config from sonic_py_common import device_info from sonic_py_common import multi_asic +from utilities_common.general import load_db_config # mock the redis for unit test purposes # try: @@ -50,6 +51,7 @@ def translate_line(line, ports): def main(): (platform, hwsku) = device_info.get_platform_and_hwsku() ports = {} + load_db_config() for ns in multi_asic.get_namespace_list(): (ports_ns, _, _) = get_port_config(hwsku=hwsku, platform=platform, asic_name=ns) ports.update(ports_ns) diff --git a/scripts/reboot b/scripts/reboot index 2d1cd8a87c9..b5b6a7a585a 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -1,4 +1,10 @@ #!/bin/bash + +declare -r EXIT_SUCCESS=0 +declare -r EXIT_ERROR=1 +declare -r WATCHDOG_UTIL="/usr/local/bin/watchdogutil" +declare -r PRE_REBOOT_HOOK="pre_reboot_hook" + DEVPATH="/usr/share/sonic/device" PLAT_REBOOT="platform_reboot" PLATFORM_UPDATE_REBOOT_CAUSE="platform_update_reboot_cause" @@ -34,6 +40,8 @@ PLATFORM_FWUTIL_AU_REBOOT_HANDLE="platform_fw_au_reboot_handle" REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" TAG_LATEST=no +REBOOT_FLAGS="" +FORCE_REBOOT="no" function debug() { @@ -121,9 +129,8 @@ function show_help_and_exit() echo " " echo " Available options:" echo " -h, -? : getting this help" - echo " -f : execute reboot force" - exit 0 + exit ${EXIT_SUCCESS} } function setup_reboot_variables() @@ -166,13 +173,13 @@ function check_conflict_boot_in_fw_update() FW_AU_TASK_FILE=$(compgen -G ${FW_AU_TASK_FILE_REGEX}) || true if [[ -n "${FW_AU_TASK_FILE}" ]] && [[ ! -f "${FW_AU_TASK_FILE_EXP}" ]]; then VERBOSE=yes debug "Firmware auto update scheduled for a different reboot: ${FW_AU_TASK_FILE}" - exit 1 + exit ${EXIT_ERROR} fi } function parse_options() { - while getopts "h?vf" opt; do + while getopts "h?v" opt; do case ${opt} in h|\? ) show_help_and_exit @@ -183,6 +190,10 @@ function parse_options() t ) TAG_LATEST=no ;; + f ) + REBOOT_FLAGS+=" -f" + FORCE_REBOOT="yes" + ;; esac done } @@ -192,7 +203,7 @@ parse_options $@ # Exit if not superuser if [[ "$EUID" -ne 0 ]]; then echo "This command must be run as root" >&2 - exit 1 + exit ${EXIT_ERROR} fi debug "User requested rebooting device ..." @@ -242,6 +253,23 @@ if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_UPDATE_REBOOT_CAUSE} ]; then ${DEVPATH}/${PLATFORM}/${PLATFORM_UPDATE_REBOOT_CAUSE} fi +if [ -x ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} ]; then + debug "Executing the pre-reboot script" + ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} + EXIT_CODE=$? + if [[ ${EXIT_CODE} != ${EXIT_SUCCESS} ]]; then + if [[ "${FORCE_REBOOT}" != "yes" ]]; then + echo "Reboot is interrupted: use -f (force) to override" + exit ${EXIT_ERROR} + fi + fi +fi + +if [ -x ${WATCHDOG_UTIL} ]; then + debug "Enabling the Watchdog before reboot" + ${WATCHDOG_UTIL} arm +fi + if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then VERBOSE=yes debug "Rebooting with platform ${PLATFORM} specific tool ..." ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} $@ @@ -260,4 +288,4 @@ if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then fi VERBOSE=yes debug "Issuing OS-level reboot ..." >&2 -exec /sbin/reboot $@ +exec /sbin/reboot ${REBOOT_FLAGS} diff --git a/scripts/route_check.py b/scripts/route_check.py index 4346d733fc0..ee417dc49cc 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -50,6 +50,8 @@ from ipaddress import ip_network from swsscommon import swsscommon from utilities_common import chassis +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config APPL_DB_NAME = 'APPL_DB' ASIC_DB_NAME = 'ASIC_DB' @@ -76,6 +78,8 @@ FRR_CHECK_RETRIES = 3 FRR_WAIT_TIME = 15 +REDIS_TIMEOUT_MSECS = 0 + class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -276,12 +280,12 @@ def is_vrf(k): return k.startswith("Vrf") -def get_routes(): +def get_appdb_routes(namespace): """ helper to read route table from APPL-DB. :return list of sorted routes with prefix ensured """ - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for routes") tbl = swsscommon.Table(db, 'ROUTE_TABLE') keys = tbl.getKeys() @@ -298,15 +302,15 @@ def get_routes(): return sorted(valid_rt) -def get_route_entries(): +def get_asicdb_routes(namespace): """ helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ - db = swsscommon.DBConnector(ASIC_DB_NAME, 0) + db = swsscommon.DBConnector(ASIC_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) subs = swsscommon.SubscriberStateTable(db, ASIC_TABLE_NAME) - print_message(syslog.LOG_DEBUG, "ASIC DB connected") + print_message(syslog.LOG_DEBUG, "ASIC DB {} connected".format(namespace)) rt = [] while True: @@ -324,37 +328,42 @@ def get_route_entries(): return (selector, subs, sorted(rt)) -def is_suppress_fib_pending_enabled(): +def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise """ - cfg_db = swsscommon.ConfigDBConnector() - cfg_db.connect() - + cfg_db = multi_asic.connect_config_db_for_ns(namespace) state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') return state == 'enabled' -def get_frr_routes(): +def get_frr_routes(namespace): """ Read routes from zebra through CLI command :return frr routes dictionary """ + if namespace == multi_asic.DEFAULT_NAMESPACE: + v4_route_cmd = ['show', 'ip', 'route', 'json'] + v6_route_cmd = ['show', 'ipv6', 'route', 'json'] + else: + v4_route_cmd = ['show', 'ip', 'route', '-n', namespace, 'json'] + v6_route_cmd = ['show', 'ipv6', 'route', '-n', namespace, 'json'] - output = subprocess.check_output('show ip route json', shell=True) + output = subprocess.check_output(v4_route_cmd, text=True) routes = json.loads(output) - output = subprocess.check_output('show ipv6 route json', shell=True) + output = subprocess.check_output(v6_route_cmd, text=True) routes.update(json.loads(output)) + print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, routes)) return routes -def get_interfaces(): +def get_interfaces(namespace): """ helper to read interface table from APPL-DB. :return sorted list of IP addresses with added prefix """ - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for interfaces") tbl = swsscommon.Table(db, 'INTF_TABLE') keys = tbl.getKeys() @@ -374,20 +383,20 @@ def get_interfaces(): return sorted(intf) -def filter_out_local_interfaces(keys): +def filter_out_local_interfaces(namespace, keys): """ helper to filter out local interfaces :param keys: APPL-DB:ROUTE_TABLE Routes to check. :return keys filtered out of local """ rt = [] - local_if_lst = {'eth0', 'docker0'} + local_if_lst = {'eth0', 'eth1', 'docker0'} #eth1 is added to skip route installed in AAPL_DB on packet-chassis local_if_lo = [r'tun0', r'lo', r'Loopback\d+'] chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) tbl = swsscommon.Table(db, 'ROUTE_TABLE') for k in keys: @@ -407,20 +416,20 @@ def filter_out_local_interfaces(keys): return rt -def filter_out_voq_neigh_routes(keys): +def filter_out_voq_neigh_routes(namespace, keys): """ helper to filter out voq neigh routes. These are the routes statically added for the voq neighbors. We skip writing route entries in asic db for these. We filter out reporting error on all the host routes written on inband interface prefixed with "Ethernte-IB" - :param keys: APPL-DB:ROUTE_TABLE Routes to check. + :param namespace: Asic namespace, keys: APPL-DB:ROUTE_TABLE Routes to check. :return keys filtered out for voq neigh routes """ rt = [] local_if_re = [r'Ethernet-IB\d+'] - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) tbl = swsscommon.Table(db, 'ROUTE_TABLE') for k in keys: @@ -452,13 +461,13 @@ def filter_out_default_routes(lst): return upd -def filter_out_vnet_routes(routes): +def filter_out_vnet_routes(namespace, routes): """ Helper to filter out VNET routes :param routes: list of routes to filter :return filtered list of routes. """ - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', REDIS_TIMEOUT_MSECS, True, namespace) vnet_route_table = swsscommon.Table(db, 'VNET_ROUTE_TABLE') vnet_route_tunnel_table = swsscommon.Table(db, 'VNET_ROUTE_TUNNEL_TABLE') @@ -488,14 +497,14 @@ def is_dualtor(config_db): return subtype.lower() == 'dualtor' -def filter_out_standalone_tunnel_routes(routes): - config_db = swsscommon.ConfigDBConnector() - config_db.connect() +def filter_out_standalone_tunnel_routes(namespace, routes): + + config_db = multi_asic.connect_config_db_for_ns(namespace) if not is_dualtor(config_db): return routes - app_db = swsscommon.DBConnector('APPL_DB', 0) + app_db = swsscommon.DBConnector('APPL_DB', REDIS_TIMEOUT_MSECS, True, namespace) neigh_table = swsscommon.Table(app_db, 'NEIGH_TABLE') neigh_keys = neigh_table.getKeys() standalone_tunnel_route_ips = [] @@ -524,19 +533,30 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes +def is_feature_bgp_enabled(namespace): + """ + Check if bgp feature is enabled or disabled. + Return True if enabled else False. + """ + cfg_db = multi_asic.connect_config_db_for_ns(namespace) + feature_table = cfg_db.get_table("FEATURE") + bgp_enabled = False + if 'bgp' in feature_table: + if feature_table['bgp']["state"] == "enabled": + bgp_enabled = True + return bgp_enabled -def check_frr_pending_routes(): +def check_frr_pending_routes(namespace): """ Check FRR routes for offload flag presence by executing "show ip route json" Returns a list of routes that have no offload flag. """ missed_rt = [] - retries = FRR_CHECK_RETRIES for i in range(retries): missed_rt = [] - frr_routes = get_frr_routes() + frr_routes = get_frr_routes(namespace) for _, entries in frr_routes.items(): for entry in entries: @@ -559,11 +579,11 @@ def check_frr_pending_routes(): break time.sleep(FRR_WAIT_TIME) - + print_message(syslog.LOG_DEBUG, "FRR missed routes: {}".format(missed_rt, indent=4)) return missed_rt -def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): +def mitigate_installed_not_offloaded_frr_routes(namespace, missed_frr_rt, rt_appl): """ Mitigate installed but not offloaded FRR routes. @@ -575,7 +595,7 @@ def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but given that this script ensures the route is installed in the hardware it will automitigate such a bug. """ - db = swsscommon.DBConnector('APPL_STATE_DB', 0) + db = swsscommon.DBConnector('APPL_STATE_DB', REDIS_TIMEOUT_MSECS, True, namespace) response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) @@ -598,7 +618,7 @@ def get_soc_ips(config_db): return soc_ips -def filter_out_soc_ip_routes(routes): +def filter_out_soc_ip_routes(namespace, routes): """ Ignore ASIC only routes for SOC IPs @@ -608,8 +628,7 @@ def filter_out_soc_ip_routes(routes): will use the kernel routing table), but still provide connectivity to any external traffic in case of a link issue (since this traffic will be forwarded by the ASIC). """ - config_db = swsscommon.ConfigDBConnector() - config_db.connect() + config_db = multi_asic.connect_config_db_for_ns(namespace) if not is_dualtor(config_db): return routes @@ -618,7 +637,7 @@ def filter_out_soc_ip_routes(routes): if not soc_ips: return routes - + updated_routes = [] for route in routes: if route not in soc_ips: @@ -627,9 +646,9 @@ def filter_out_soc_ip_routes(routes): return updated_routes -def get_vlan_neighbors(): +def get_vlan_neighbors(namespace): """Return a list of VLAN neighbors.""" - db = swsscommon.DBConnector(APPL_DB_NAME, 0) + db = swsscommon.DBConnector(APPL_DB_NAME, REDIS_TIMEOUT_MSECS, True, namespace) print_message(syslog.LOG_DEBUG, "APPL DB connected for neighbors") tbl = swsscommon.Table(db, 'NEIGH_TABLE') neigh_entries = tbl.getKeys() @@ -645,7 +664,7 @@ def get_vlan_neighbors(): return valid_neighs -def filter_out_vlan_neigh_route_miss(rt_appl_miss, rt_asic_miss): +def filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss): """Ignore any route miss for vlan neighbor IPs.""" def _filter_out_neigh_route(routes, neighs): @@ -658,12 +677,10 @@ def _filter_out_neigh_route(routes, neighs): updated_routes.append(route) return updated_routes, ignored_routes - config_db = swsscommon.ConfigDBConnector() - config_db.connect() + config_db = multi_asic.connect_config_db_for_ns(namespace) - print_message(syslog.LOG_DEBUG, "Ignore vlan neighbor route miss") if is_dualtor(config_db): - vlan_neighs = set(get_vlan_neighbors()) + vlan_neighs = set(get_vlan_neighbors(namespace)) rt_appl_miss, ignored_rt_appl_miss = _filter_out_neigh_route(rt_appl_miss, vlan_neighs) print_message(syslog.LOG_DEBUG, "Ignored appl route miss:", json.dumps(ignored_rt_appl_miss, indent=4)) rt_asic_miss, ignored_rt_asic_miss = _filter_out_neigh_route(rt_asic_miss, vlan_neighs) @@ -672,7 +689,7 @@ def _filter_out_neigh_route(routes, neighs): return rt_appl_miss, rt_asic_miss -def check_routes(): +def check_routes(namespace): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. @@ -691,85 +708,102 @@ def check_routes(): :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ - intf_appl_miss = [] - rt_appl_miss = [] - rt_asic_miss = [] - rt_frr_miss = [] + namespace_list = [] + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): + namespace_list.append(namespace) + else: + namespace_list = multi_asic.get_namespace_list() + print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) results = {} - adds = [] - deletes = [] + adds = {} + deletes = {} + for namespace in namespace_list: + intf_appl_miss = [] + rt_appl_miss = [] + rt_asic_miss = [] + rt_frr_miss = [] + adds[namespace] = [] + deletes[namespace] = [] + + selector, subs, rt_asic = get_asicdb_routes(namespace) - selector, subs, rt_asic = get_route_entries() + rt_appl = get_appdb_routes(namespace) + intf_appl = get_interfaces(namespace) - rt_appl = get_routes() - intf_appl = get_interfaces() + # Diff APPL-DB routes & ASIC-DB routes + rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) - # Diff APPL-DB routes & ASIC-DB routes - rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) + # Check missed ASIC routes against APPL-DB INTF_TABLE + _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) + rt_asic_miss = filter_out_default_routes(rt_asic_miss) + rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) - # Check missed ASIC routes against APPL-DB INTF_TABLE - _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) - rt_asic_miss = filter_out_default_routes(rt_asic_miss) - rt_asic_miss = filter_out_vnet_routes(rt_asic_miss) - rt_asic_miss = filter_out_standalone_tunnel_routes(rt_asic_miss) - rt_asic_miss = filter_out_soc_ip_routes(rt_asic_miss) - # Check APPL-DB INTF_TABLE with ASIC table route entries - intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) + # Check APPL-DB INTF_TABLE with ASIC table route entries + intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) - if rt_appl_miss: - rt_appl_miss = filter_out_local_interfaces(rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_voq_neigh_routes(rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) - # NOTE: On dualtor environment, ignore any route miss for the - # neighbors learned from the vlan subnet. - if rt_appl_miss or rt_asic_miss: - rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(rt_appl_miss, rt_asic_miss) + # NOTE: On dualtor environment, ignore any route miss for the + # neighbors learned from the vlan subnet. + if rt_appl_miss or rt_asic_miss: + rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) - if rt_appl_miss or rt_asic_miss: - # Look for subscribe updates for a second - adds, deletes = get_subscribe_updates(selector, subs) + if rt_appl_miss or rt_asic_miss: + # Look for subscribe updates for a second + adds[namespace], deletes[namespace] = get_subscribe_updates(selector, subs) # Drop all those for which SET received - rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds) + rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds[namespace]) # Drop all those for which DEL received - rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes) + rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes[namespace]) - if rt_appl_miss: - results["missed_ROUTE_TABLE_routes"] = rt_appl_miss + if rt_appl_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_ROUTE_TABLE_routes"] = rt_appl_miss - if intf_appl_miss: - results["missed_INTF_TABLE_entries"] = intf_appl_miss + if intf_appl_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_INTF_TABLE_entries"] = intf_appl_miss - if rt_asic_miss: - results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + if rt_asic_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes() + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - results["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss + + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) - - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") - if is_suppress_fib_pending_enabled(): - mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) - return -1, results else: print_message(syslog.LOG_INFO, "All good!") return 0, None - def main(): """ main entry point, which mainly parses the args and call check_routes @@ -782,8 +816,18 @@ def main(): parser.add_argument('-m', "--mode", type=Level, choices=list(Level), default='ERR') parser.add_argument("-i", "--interval", type=int, default=0, help="Scan interval in seconds") parser.add_argument("-s", "--log_to_syslog", action="store_true", default=True, help="Write message to syslog") + parser.add_argument('-n','--namespace', default=multi_asic.DEFAULT_NAMESPACE, help='Verify routes for this specific namespace') args = parser.parse_args() + namespace = args.namespace + if namespace is not multi_asic.DEFAULT_NAMESPACE and not multi_asic.is_multi_asic(): + print_message(syslog.LOG_ERR, "Namespace option is not valid for a single-ASIC device") + return -1, None + + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace not in multi_asic.get_namespace_list(): + print_message(syslog.LOG_ERR, "Namespace option is not valid. Choose one of {}".format(multi_asic.get_namespace_list())) + return -1, None + set_level(args.mode, args.log_to_syslog) if args.interval: @@ -797,10 +841,16 @@ def main(): interval = 1 signal.signal(signal.SIGALRM, handler) + load_db_config() + + if not is_feature_bgp_enabled(namespace): + print_message(syslog.LOG_INFO, "BGP feature is disabled, exiting without checking routes!!") + return 0, None while True: signal.alarm(TIMEOUT_SECONDS) - ret, res= check_routes() + ret, res= check_routes(namespace) + print_message(syslog.LOG_DEBUG, "ret={}, res={}".format(ret, res)) signal.alarm(0) if interval: diff --git a/scripts/route_check_test.sh b/scripts/route_check_test.sh index 989cbfae0bf..b78351f7a67 100755 --- a/scripts/route_check_test.sh +++ b/scripts/route_check_test.sh @@ -2,36 +2,95 @@ # add a route, interface & route-entry to simulate error # -sonic-db-cli APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null -sonic-db-cli ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null -sonic-db-cli APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null - -echo "------" -echo "expect errors!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" - -sonic-db-cli APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null -sonic-db-cli ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null -sonic-db-cli APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null - -# add standalone tunnel route to simulate unreachable neighbor scenario on dual ToR -# in this scenario, we expect the route mismatch to be ignored -sonic-db-cli APPL_DB hmset "NEIGH_TABLE:Vlan1000:fc02:1000::99" "neigh" "00:00:00:00:00:00" "family" "IPv6" > /dev/null -sonic-db-cli ASIC_DB hmset 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x400000000167d" "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" "SAI_PACKET_ACTION_FORWARD" > /dev/null - -echo "------" -echo "expect success on dualtor, expect error on all other devices!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" - -sonic-db-cli APPL_DB del "NEIGH_TABLE:Vlan1000:fc02:1000::99" > /dev/null -sonic-db-cli ASIC_DB del 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' > /dev/null - -echo "------" -echo "expect success!" -echo "Running Route Check..." -./route_check.py -echo "return value: $?" + +CONFIG_FILE="/etc/sonic/config_db.json" +if [ ! -e "$CONFIG_FILE" ]; then + echo "File $CONFIG_FILE not found. returning.." + exit 1 +fi + +# Extract platform and hwsku from DEVICE_METADATA using awk +platform=$(awk -F'"' '/"DEVICE_METADATA":/,/\}/{if(/"platform":/) print $4}' "$CONFIG_FILE") + +# Print the values +echo "Platform: $platform" + +PLATFORM_DIR="/usr/share/sonic/device/$platform" +if [ ! -d "$PLATFORM_DIR" ]; then + echo "Directory $PLATFORM_DIR not found. returning.." + exit 1 +fi + +ASIC_CONF_FILE="$PLATFORM_DIR/asic.conf" +echo "$ASIC_CONF_FILE" +num_asic=1 + +# Check if asic.conf exists +if [ -f "$ASIC_CONF_FILE" ]; then + if grep -q "^NUM_ASIC=" "$ASIC_CONF_FILE"; then + # Extract the value of NUM_ASIC into a local variable + num_asic=$(grep "^NUM_ASIC=" "$ASIC_CONF_FILE" | cut -d'=' -f2) + else + # Print a message if NUM_ASIC is not present + echo "NUM_ASIC not found.. returning.." + exit 1 + fi +fi + +echo "num_asic: $num_asic" + +if [ "$num_asic" -gt 1 ]; then + # test on asic0 + # add a route, interface & route-entry to simulate error + # + sonic-db-cli -n asic0 APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null + sonic-db-cli -n asic0 ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null + sonic-db-cli -n asic0 APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null + + echo "------" + echo "expect errors!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli -n asic0 APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null + sonic-db-cli -n asic0 ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null + sonic-db-cli -n asic0 APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null + +else + # add a route, interface & route-entry to simulate error + # + sonic-db-cli APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" > /dev/null + sonic-db-cli ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" > /dev/null + sonic-db-cli APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" > /dev/null + + echo "------" + echo "expect errors!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" > /dev/null + sonic-db-cli ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" > /dev/null + sonic-db-cli APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" > /dev/null + + # add standalone tunnel route to simulate unreachable neighbor scenario on dual ToR + # in this scenario, we expect the route mismatch to be ignored + sonic-db-cli APPL_DB hmset "NEIGH_TABLE:Vlan1000:fc02:1000::99" "neigh" "00:00:00:00:00:00" "family" "IPv6" > /dev/null + sonic-db-cli ASIC_DB hmset 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x400000000167d" "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" "SAI_PACKET_ACTION_FORWARD" > /dev/null + + echo "------" + echo "expect success on dualtor, expect error on all other devices!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" + + sonic-db-cli APPL_DB del "NEIGH_TABLE:Vlan1000:fc02:1000::99" > /dev/null + sonic-db-cli ASIC_DB del 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{"dest":"fc02:1000::99/128","switch_id":"oid:0x21000000000000","vr":"oid:0x300000000007c"}' > /dev/null + + echo "------" + echo "expect success!" + echo "Running Route Check..." + ./route_check.py + echo "return value: $?" +fi diff --git a/scripts/sfpshow b/scripts/sfpshow index 81add132964..b04bf516311 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -14,7 +14,6 @@ from typing import Dict import click from natsort import natsorted -from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix, recirc_prefix from sonic_py_common import multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string from utilities_common.sfp_helper import ( @@ -305,7 +304,7 @@ class SFPShow(object): return output # Convert sfp info in DB to cli output string - def convert_sfp_info_to_output_string(self, sfp_info_dict): + def convert_sfp_info_to_output_string(self, sfp_info_dict, sfp_firmware_info_dict): indent = ' ' * 8 output = '' is_sfp_cmis = 'cmis_rev' in sfp_info_dict @@ -333,6 +332,11 @@ class SFPShow(object): output += '{}N/A\n'.format((indent * 2)) elif key == 'application_advertisement': output += covert_application_advertisement_to_output_string(indent, sfp_info_dict) + elif key == 'active_firmware' or key == 'inactive_firmware': + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_firmware_info_dict[key] if key in sfp_firmware_info_dict else 'N/A') + elif key.startswith(('e1_', 'e2_')): + if key in sfp_firmware_info_dict: + output += '{}{}: {}\n'.format(indent, data_map[key], sfp_firmware_info_dict[key]) else: output += '{}{}: {}\n'.format(indent, data_map[key], sfp_info_dict[key]) @@ -441,12 +445,13 @@ class SFPShow(object): output = '' sfp_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface_name)) + sfp_firmware_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(interface_name)) if sfp_info_dict: if sfp_info_dict['type'] == RJ45_PORT_TYPE: output = 'SFP EEPROM is not applicable for RJ45 port\n' else: output = 'SFP EEPROM detected\n' - sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict) + sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict, sfp_firmware_info_dict) output += sfp_info_output if dump_dom: @@ -517,8 +522,10 @@ class SFPShow(object): for suffix in ZR_PM_THRESHOLD_KEY_SUFFIXS: key = self.convert_pm_prefix_to_threshold_prefix( prefix) + suffix - thresholds.append( - float(sfp_threshold_dict[key]) if key in sfp_threshold_dict else None) + if key in sfp_threshold_dict and sfp_threshold_dict[key] != 'N/A': + thresholds.append(float(sfp_threshold_dict[key])) + else: + thresholds.append(None) tca_high, tca_low = None, None if values[2] is not None and thresholds[0] is not None: @@ -539,6 +546,10 @@ class SFPShow(object): output = ZR_PM_NOT_APPLICABLE_STR + '\n' return output + def is_valid_physical_port(self, port_name): + role = self.db.get(self.db.APPL_DB, 'PORT_TABLE:{}'.format(port_name), multi_asic.PORT_ROLE) + return multi_asic.is_front_panel_port(port_name, role) + @multi_asic_util.run_on_multi_asic def get_eeprom(self): if self.intf_name is not None: @@ -548,7 +559,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_eeprom[interface] = self.convert_interface_sfp_info_to_cli_output_string( self.db, interface, self.dump_dom) @@ -572,7 +583,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: key = re.split(':', i, maxsplit=1)[-1].strip() - if key and key.startswith(front_panel_prefix()) and not key.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if key and self.is_valid_physical_port(key): presence_string = self.convert_interface_sfp_presence_state_to_cli_output_string(self.db, key) port_table.append((key, presence_string)) @@ -587,7 +598,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_pm[interface] = self.convert_interface_sfp_pm_to_cli_output_string( self.db, interface) @@ -600,7 +611,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + if interface and self.is_valid_physical_port(interface): self.intf_status[interface] = self.convert_interface_sfp_status_to_cli_output_string( self.db, interface) diff --git a/sfputil/main.py b/sfputil/main.py index eddc43f3dc0..ad0b1b3775e 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -24,6 +24,7 @@ from utilities_common.sfp_helper import covert_application_advertisement_to_output_string from utilities_common.sfp_helper import QSFP_DATA_MAP from tabulate import tabulate +from utilities_common.general import load_db_config VERSION = '3.0' @@ -84,8 +85,6 @@ 'encoding': 'Encoding', 'connector': 'Connector', 'application_advertisement': 'Application Advertisement', - 'active_firmware': 'Active Firmware Version', - 'inactive_firmware': 'Inactive Firmware Version', 'hardware_rev': 'Hardware Revision', 'media_interface_code': 'Media Interface Code', 'host_electrical_interface': 'Host Electrical Interface', @@ -563,6 +562,7 @@ def load_sfputilhelper(): def load_port_config(): + load_db_config() try: if multi_asic.is_multi_asic(): # For multi ASIC platforms we pass DIR of port_config_file_path and the number of asics @@ -705,7 +705,7 @@ def eeprom_hexdump(port, page): page = 0 else: page = validate_eeprom_page(page) - return_code, output = eeprom_hexdump_single_port(port, int(str(page), base=16)) + return_code, output = eeprom_hexdump_single_port(port, page) click.echo(output) sys.exit(return_code) else: @@ -1314,9 +1314,12 @@ def update_firmware_info_to_state_db(port_name): state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) if state_db is not None: state_db.connect(state_db.STATE_DB) - active_firmware, inactive_firmware = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() + if transceiver_firmware_info_dict is not None: + active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') + inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) # 'firmware' subgroup @cli.group() diff --git a/show/fabric.py b/show/fabric.py index c8dc956e44a..785e1ab4779 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -13,6 +13,35 @@ def counters(): """Show fabric port counters""" pass +@fabric.group(cls=clicommon.AliasedGroup) +def monitor(): + """Show fabric monitor""" + pass + +@monitor.group(invoke_without_command=True) +@multi_asic_util.multi_asic_click_option_namespace +@click.option('-e', '--errors', is_flag=True) +def capacity(namespace, errors): + """Show fabric capacity""" + cmd = ['fabricstat', '-c'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if errors: + cmd += ['-e'] + clicommon.run_command(cmd) + +@fabric.group(invoke_without_command=True) +@multi_asic_util.multi_asic_click_option_namespace +@click.option('-e', '--errors', is_flag=True) +def isolation(namespace, errors): + """Show fabric isolation status""" + cmd = ['fabricstat', '-i'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if errors: + cmd += ["-e"] + clicommon.run_command(cmd) + @fabric.group(invoke_without_command=True) @multi_asic_util.multi_asic_click_option_namespace @click.option('-e', '--errors', is_flag=True) diff --git a/show/main.py b/show/main.py index 8bc51e20fd4..8e45c028e7e 100755 --- a/show/main.py +++ b/show/main.py @@ -142,6 +142,24 @@ def get_cmd_output(cmd): proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) return proc.communicate()[0], proc.returncode +def get_config_json_by_namespace(namespace): + cmd = ['sonic-cfggen', '-d', '--print-data'] + if namespace is not None and namespace != multi_asic.DEFAULT_NAMESPACE: + cmd += ['-n', namespace] + + stdout, rc = get_cmd_output(cmd) + if rc: + click.echo("Failed to get cmd output '{}':rc {}".format(cmd, rc)) + raise click.Abort() + + try: + config_json = json.loads(stdout) + except JSONDecodeError as e: + click.echo("Failed to load output '{}':{}".format(cmd, e)) + raise click.Abort() + + return config_json + # Lazy global class instance for SONiC interface name to alias conversion iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) @@ -1407,25 +1425,25 @@ def runningconfiguration(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Show full running configuration""" - cmd = ['sonic-cfggen', '-d', '--print-data'] - stdout, rc = get_cmd_output(cmd) - if rc: - click.echo("Failed to get cmd output '{}':rc {}".format(cmd, rc)) - raise click.Abort() + output = {} + bgpraw_cmd = "show running-config" - try: - output = json.loads(stdout) - except JSONDecodeError as e: - click.echo("Failed to load output '{}':{}".format(cmd, e)) - raise click.Abort() + import utilities_common.bgp_util as bgp_util + # In multiaisc, the namespace is changed to 'localhost' by design + host_config = get_config_json_by_namespace(multi_asic.DEFAULT_NAMESPACE) + output['localhost'] = host_config - if not multi_asic.is_multi_asic(): - bgpraw_cmd = [constants.RVTYSH_COMMAND, '-c', 'show running-config'] - bgpraw, rc = get_cmd_output(bgpraw_cmd) - if rc: - bgpraw = "" - output['bgpraw'] = bgpraw - click.echo(json.dumps(output, indent=4)) + if multi_asic.is_multi_asic(): + ns_list = multi_asic.get_namespace_list() + for ns in ns_list: + ns_config = get_config_json_by_namespace(ns) + if bgp_util.is_bgp_feature_state_enabled(ns): + ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) + output[ns] = ns_config + click.echo(json.dumps(output, indent=4)) + else: + host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd) + click.echo(json.dumps(output['localhost'], indent=4)) # 'acl' subcommand ("show runningconfiguration acl") diff --git a/show/syslog.py b/show/syslog.py index d258be3351d..ad4e7b5b854 100644 --- a/show/syslog.py +++ b/show/syslog.py @@ -1,9 +1,12 @@ +from unicodedata import name import click import tabulate from natsort import natsorted import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util +from sonic_py_common import multi_asic from syslog_util import common as syslog_common @@ -83,8 +86,11 @@ def rate_limit_host(db): name='rate-limit-container' ) @click.argument('service_name', metavar='', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices() + ['default']), + show_default=True, help='Namespace name or all') @clicommon.pass_db -def rate_limit_container(db, service_name): +def rate_limit_container(db, service_name, namespace): """ Show syslog rate limit configuration for containers """ header = [ @@ -92,16 +98,57 @@ def rate_limit_container(db, service_name): "INTERVAL", "BURST", ] - body = [] + + # Feature configuration in global DB features = db.cfgdb.get_table(syslog_common.FEATURE_TABLE) - + if service_name: + syslog_common.service_validator(features, service_name) + + global_feature_data, per_ns_feature_data = syslog_common.extract_feature_data(features) + if not namespace: + # for all namespaces + is_first = True + for namespace, cfg_db in natsorted(db.cfgdb_clients.items()): + if is_first: + is_first = False + else: + # add a new blank line between each namespace + click.echo('\n') + + if namespace == multi_asic.DEFAULT_NAMESPACE: + if service_name and service_name not in global_feature_data: + continue + echo_rate_limit_config(header, cfg_db, service_name, global_feature_data) + else: + if service_name and service_name not in per_ns_feature_data: + continue + echo_rate_limit_config(header, cfg_db, service_name, per_ns_feature_data, namespace) + elif namespace == 'default': + # for default/global namespace only + echo_rate_limit_config(header, db.cfgdb, service_name, global_feature_data) + else: + # for a specific namespace + echo_rate_limit_config(header, db.cfgdb_clients[namespace], service_name, per_ns_feature_data, namespace) + + +def echo_rate_limit_config(header, db, service_name, features, namespace=None): + """Echo rate limit configuration + + Args: + header (list): CLI headers + db (object): Db object + service_name (str): Nullable service name to be printed. + features (dict): Feature data got from CONFIG DB + namespace (str, optional): Namespace provided by user. Defaults to None. + """ + body = [] if service_name: syslog_common.service_validator(features, service_name) service_list = [service_name] else: - service_list = [name for name, service_config in features.items() if service_config.get(syslog_common.SUPPORT_RATE_LIMIT, '').lower() == 'true'] - - syslog_configs = db.cfgdb.get_table(syslog_common.SYSLOG_CONFIG_FEATURE_TABLE) + service_list = features.keys() + + syslog_configs = db.get_table(syslog_common.SYSLOG_CONFIG_FEATURE_TABLE) for service in natsorted(service_list): if service in syslog_configs: entry = syslog_configs[service] @@ -110,5 +157,11 @@ def rate_limit_container(db, service_name): entry.get(syslog_common.SYSLOG_RATE_LIMIT_BURST, 'N/A')]) else: body.append([service, 'N/A', 'N/A']) - - click.echo(format(header, body)) + + if namespace: + click.echo(f'Namespace {namespace}:') + + if body: + click.echo(format(header, body)) + else: + click.echo('N/A') diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index 0490a482163..9e83f8edd7d 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -89,7 +89,7 @@ def set_fips(self, image, enable): cmdline = out.strip() cmdline = re.sub('^linuxargs=', '', cmdline) cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips - run_command(['/usr/bin/fw_setenv', 'linuxargs'] + split(cmdline)) + run_command(['/usr/bin/fw_setenv', 'linuxargs', cmdline]) click.echo('Done') def get_fips(self, image): diff --git a/syslog_util/common.py b/syslog_util/common.py index 5282c088e8f..742e6ae059d 100644 --- a/syslog_util/common.py +++ b/syslog_util/common.py @@ -1,4 +1,5 @@ import click +from sonic_py_common import multi_asic FEATURE_TABLE = "FEATURE" @@ -9,6 +10,8 @@ SYSLOG_RATE_LIMIT_INTERVAL = 'rate_limit_interval' SYSLOG_RATE_LIMIT_BURST = 'rate_limit_burst' SUPPORT_RATE_LIMIT = 'support_syslog_rate_limit' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' def rate_limit_validator(interval, burst): @@ -70,7 +73,33 @@ def save_rate_limit_to_db(db, service_name, interval, burst, log): data[SYSLOG_RATE_LIMIT_INTERVAL] = interval if burst is not None: data[SYSLOG_RATE_LIMIT_BURST] = burst - db.cfgdb.mod_entry(table, key, data) + db.mod_entry(table, key, data) log.log_notice(f"Configured syslog {service_name} rate-limits: interval={data.get(SYSLOG_RATE_LIMIT_INTERVAL, 'N/A')},\ burst={data.get(SYSLOG_RATE_LIMIT_BURST, 'N/A')}") + +def extract_feature_data(features): + """Extract feature data in global scope and feature data in per ASIC namespace scope + + Args: + features (dict): Feature data got from CONFIG DB + + Returns: + tuple: + """ + global_feature_data = {} + per_ns_feature_data = {} + is_multi_asic = multi_asic.is_multi_asic() + for feature_name, feature_config in features.items(): + if not feature_config.get(SUPPORT_RATE_LIMIT, '').lower() == 'true': + continue + + if is_multi_asic: + if feature_config.get(FEATURE_HAS_GLOBAL_SCOPE, '').lower() == 'true': + global_feature_data[feature_name] = feature_config + + if feature_config.get(FEATURE_HAS_PER_ASIC_SCOPE, '').lower() == 'true': + per_ns_feature_data[feature_name] = feature_config + else: + global_feature_data[feature_name] = feature_config + return global_feature_data, per_ns_feature_data diff --git a/tests/acl_loader_test.py b/tests/acl_loader_test.py index 599e47461ad..01dc8602d51 100644 --- a/tests/acl_loader_test.py +++ b/tests/acl_loader_test.py @@ -56,6 +56,36 @@ def test_validate_mirror_action(self, acl_loader): assert acl_loader.validate_actions("DATAACL", forward_packet_action) assert not acl_loader.validate_actions("DATAACL", drop_packet_action) + def test_load_rules_when_capability_table_is_empty(self, acl_loader): + """ + Test case to verify that acl_loader can still load dataplane acl rules when skip_action_validation + is true, and capability table in state_db is absent + """ + # Backup and empty the capability table from state_db + SWITCH_CAPABILITY = "SWITCH_CAPABILITY|switch" + if acl_loader.per_npu_statedb: + statedb = list(acl_loader.per_npu_statedb.values())[0] + else: + statedb = acl_loader.statedb + switchcapability = statedb.get_all("STATE_DB", SWITCH_CAPABILITY) + statedb.delete("STATE_DB", SWITCH_CAPABILITY) + try: + acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json'), skip_action_validation=True) + assert acl_loader.rules_info[("DATAACL", "RULE_2")] + assert acl_loader.rules_info[("DATAACL", "RULE_2")] == { + "VLAN_ID": 369, + "ETHER_TYPE": "2048", + "IP_PROTOCOL": 6, + "SRC_IP": "20.0.0.2/32", + "DST_IP": "30.0.0.3/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9998" + } + finally: + # Restore the capability table in state_db + for key, value in switchcapability.items(): + statedb.set("STATE_DB", SWITCH_CAPABILITY, key, value) + def test_vlan_id_translation(self, acl_loader): acl_loader.rules_info = {} acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json')) diff --git a/tests/config_fabric_test.py b/tests/config_fabric_test.py index 1f56ea416ac..230615cbaa8 100644 --- a/tests/config_fabric_test.py +++ b/tests/config_fabric_test.py @@ -4,6 +4,7 @@ import os import pytest import sys +import importlib from click.testing import CliRunner from utilities_common.db import Db @@ -41,22 +42,35 @@ def test_config_isolation(self, ctx): expect_result = 0 assert operator.eq(result.exit_code, expect_result) - # Issue command "config fabric port isolate 1", - # check if the result has the error message as port 1 is not in use. - result = self.basic_check("port", ["isolate", "1"], ctx) - assert "Port 1 is not in use" in result.output - # Issue command "config fabric port unisolate 0", # check if the result is expected. result = self.basic_check("port", ["unisolate", "0"], ctx) expect_result = 0 assert operator.eq(result.exit_code, expect_result) + # Issue command "config fabric port unisolate 0", + # check if the result is expected. + result = self.basic_check("port", ["unisolate", "0", "--force"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + assert "Force unisolate the link" in result.output + + # Issue command "config fabric port isolate 1", + # check if the result has the error message as port 1 is not in use. + result = self.basic_check("port", ["isolate", "1"], ctx) + assert "Port 1 is not in use" in result.output + # Issue command "config fabric port unisolate 1", # check if the result has the error message as port 1 is not in use. result = self.basic_check("port", ["unisolate", "1"], ctx) assert "Port 1 is not in use" in result.output + # Issue command "config fabric port unisolate all -n asic1" + # check if the result has the warning message + result = self.basic_check("port", ["unisolate", "all", "--force"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + def test_config_fabric_monitor_threshold(self, ctx): # Issue command "config fabric port monitor error threshold <#> <#>" # with an out of range number, check if the result has the error message. @@ -93,3 +107,84 @@ def test_config_fabric_monitor_threshold(self, ctx): result = self.basic_check("port", ["monitor", "poll", "threshold", "recovery", "8"], ctx) expect_result = 0 assert operator.eq(result.exit_code, expect_result) + + def test_config_fabric_monitor_state(self, ctx): + # Issue command "config fabric port monitor state " + result = self.basic_check("port", ["monitor", "state", "enable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + result = self.basic_check("port", ["monitor", "state", "disable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + def test_config_capacity(self, ctx): + # Issue command "config fabric monitor capacity threshold 90", + # check if the result is expected. + result = self.basic_check("monitor", ["capacity", "threshold", "90"], ctx) + expect_result=0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric monitor capacity threshold 3", + # check if the result has the warning message. + result = self.basic_check("monitor", ["capacity", "threshold", "3"], ctx) + assert "threshold must be in range 5...250" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + +class TestMultiAsicConfigFabric(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def basic_check(self, command_name, para_list, ctx): + # This function issues command of "config fabric xxxx", + # and returns the result of the command. + runner = CliRunner() + result = runner.invoke(config.config.commands["fabric"].commands[command_name], para_list, obj = ctx) + print(result.output) + return result + + def test_multi_config_fabric_monitor_state(self, ctx): + result = self.basic_check("port", ["monitor", "state", "disable"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + def test_config_capacity_multi(self, ctx): + # Issue command "config fabric monitor capacity threshold 80", + # check if the result is expected. + result = self.basic_check("monitor", ["capacity", "threshold", "80"], ctx) + expect_result=0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric monitor capacity threshold 4", + # check if the result has the warning message. + result = self.basic_check("monitor", ["capacity", "threshold", "4"], ctx) + assert "threshold must be in range 5...250" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN_TEST") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() diff --git a/tests/config_mlnx_test.py b/tests/config_mlnx_test.py new file mode 100644 index 00000000000..0cf2e117b40 --- /dev/null +++ b/tests/config_mlnx_test.py @@ -0,0 +1,47 @@ +import sys +import click +import pytest +import config.plugins.mlnx as config +from unittest.mock import patch, Mock +from click.testing import CliRunner +from utilities_common.db import Db + + +@patch('config.plugins.mlnx.sniffer_env_variable_set', Mock(return_value=False)) +@patch('config.plugins.mlnx.sniffer_filename_generate', Mock(return_value="sdk_file_name")) +class TestConfigMlnx(object): + def setup(self): + print('SETUP') + + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) + def test_config_sniffer_enable(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) + assert "SDK sniffer is Enabled, recording file is sdk_file_name." in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) + def test_config_sniffer_disble(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) + assert "SDK sniffer is Disabled." in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) + def test_config_sniffer_enable_fail(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) + assert "SDK sniffer is Enabled, recording file is sdk_file_name." not in result.output + + @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) + def test_config_sniffer_disble_fail(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) + assert "SDK sniffer is Disabled." not in result.output + + def teardown(self): + print('TEARDOWN') + diff --git a/tests/config_override_input/aaa_yang_hard_check.json b/tests/config_override_input/aaa_yang_hard_check.json new file mode 100644 index 00000000000..61794f1ece8 --- /dev/null +++ b/tests/config_override_input/aaa_yang_hard_check.json @@ -0,0 +1,28 @@ +{ + "running_config": { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + }, + "golden_config": { + }, + "expected_config": { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + } +} diff --git a/tests/config_override_input/multi_asic_dm_rm.json b/tests/config_override_input/multi_asic_dm_rm.json deleted file mode 100644 index a4c0dd5fa7c..00000000000 --- a/tests/config_override_input/multi_asic_dm_rm.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "localhost": { - "DEVICE_METADATA": {} - }, - "asic0": { - "DEVICE_METADATA": {} - }, - "asic1": { - "DEVICE_METADATA": {} - } -} diff --git a/tests/config_override_input/multi_asic_feature_rm.json b/tests/config_override_input/multi_asic_feature_rm.json new file mode 100644 index 00000000000..b29cdf952f7 --- /dev/null +++ b/tests/config_override_input/multi_asic_feature_rm.json @@ -0,0 +1,11 @@ +{ + "localhost": { + "FEATURE": {} + }, + "asic0": { + "FEATURE": {} + }, + "asic1": { + "FEATURE": {} + } +} diff --git a/tests/config_override_input/multi_asic_macsec_ov.json b/tests/config_override_input/multi_asic_macsec_ov.json index ba86f6ef606..9a4a5c478ce 100644 --- a/tests/config_override_input/multi_asic_macsec_ov.json +++ b/tests/config_override_input/multi_asic_macsec_ov.json @@ -2,21 +2,30 @@ "localhost": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } }, "asic0": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } }, "asic1": { "MACSEC_PROFILE": { "profile": { - "key": "value" + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" } } } diff --git a/tests/config_override_input/multi_asic_missing_asic.json b/tests/config_override_input/multi_asic_missing_asic.json index db8ba8ec80a..a1eeb27f262 100644 --- a/tests/config_override_input/multi_asic_missing_asic.json +++ b/tests/config_override_input/multi_asic_missing_asic.json @@ -1,8 +1,8 @@ { "localhost": { - "DEVICE_METADATA": {} + "FEATURE": {} }, "asic0": { - "DEVICE_METADATA": {} + "FEATURE": {} } } diff --git a/tests/config_override_test.py b/tests/config_override_test.py index 19d2ddc197c..a46be5ef603 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -18,11 +18,12 @@ FULL_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "full_config_override.json") PORT_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "port_config_override.json") EMPTY_TABLE_REMOVAL = os.path.join(DATA_DIR, "empty_table_removal.json") +AAA_YANG_HARD_CHECK = os.path.join(DATA_DIR, "aaa_yang_hard_check.json") RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") -MULTI_ASIC_DEVICE_METADATA_RM = os.path.join(DATA_DIR, "multi_asic_dm_rm.json") +MULTI_ASIC_FEATURE_RM = os.path.join(DATA_DIR, "multi_asic_feature_rm.json") MULTI_ASIC_DEVICE_METADATA_GEN_SYSINFO = os.path.join(DATA_DIR, "multi_asic_dm_gen_sysinfo.json") MULTI_ASIC_MISSING_LOCALHOST_OV = os.path.join(DATA_DIR, "multi_asic_missing_localhost.json") MULTI_ASIC_MISSING_ASIC_OV = os.path.join(DATA_DIR, "multi_asic_missing_asic.json") @@ -104,7 +105,9 @@ def read_json_file_side_effect(filename): ['golden_config_db.json', '--dry-run']) assert result.exit_code == 0 - assert json.loads(result.output) == current_config + start_pos = result.output.find('{') + json_text = result.output[start_pos:] + assert json.loads(json_text) == current_config def test_golden_config_db_empty(self): db = Db() @@ -159,6 +162,25 @@ def test_golden_config_db_empty_table_removal(self): db, config, read_data['running_config'], read_data['golden_config'], read_data['expected_config']) + def test_aaa_yang_hard_depdency_check_failure(self): + """YANG hard depdency must be satisfied""" + db = Db() + with open(AAA_YANG_HARD_CHECK, "r") as f: + read_data = json.load(f) + def read_json_file_side_effect(filename): + return read_data['golden_config'] + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + write_init_config_db(db.cfgdb, read_data['running_config']) + + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + def check_override_config_table(self, db, config, running_config, golden_config, expected_config): def read_json_file_side_effect(filename): @@ -288,7 +310,15 @@ def read_json_file_side_effect(filename): # The profile_content was copied from MULTI_ASIC_MACSEC_OV, where all # ns sharing the same content: {"profile": {"key": "value"}} - profile_content = {"profile": {"key": "value"}} + profile_content = { + "profile": { + "primary_cak": "1159485744465e5a537272050a1011073557475152020c0e040c57223a357d7d71", + "primary_ckn": "6162636465666768696A6B6C6D6E6F70", + "fallback_cak": "000000000000000000000000000000000000000000000000000000000000000000", + "fallback_ckn": "11111111111111111111111111111111" + + } + } with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): @@ -300,16 +330,16 @@ def read_json_file_side_effect(filename): for ns, config_db in cfgdb_clients.items(): assert config_db.get_config()['MACSEC_PROFILE'] == profile_content - def test_device_metadata_table_rm(self): + def test_feature_table_rm(self): def read_json_file_side_effect(filename): - with open(MULTI_ASIC_DEVICE_METADATA_RM, "r") as f: - device_metadata = json.load(f) - return device_metadata + with open(MULTI_ASIC_FEATURE_RM, "r") as f: + feature = json.load(f) + return feature db = Db() cfgdb_clients = db.cfgdb_clients for ns, config_db in cfgdb_clients.items(): - assert 'DEVICE_METADATA' in config_db.get_config() + assert 'FEATURE' in config_db.get_config() with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): @@ -319,7 +349,7 @@ def read_json_file_side_effect(filename): assert result.exit_code == 0 for ns, config_db in cfgdb_clients.items(): - assert 'DEVICE_METADATA' not in config_db.get_config() + assert 'FEATURE' not in config_db.get_config() def test_device_metadata_keep_sysinfo(self): def read_json_file_side_effect(filename): diff --git a/tests/config_test.py b/tests/config_test.py index cc0ac22e986..1054a52a33f 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -15,7 +15,7 @@ import click from click.testing import CliRunner -from sonic_py_common import device_info +from sonic_py_common import device_info, multi_asic from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -1699,7 +1699,7 @@ def test_config_load_mgmt_config_ipv6_only(self, get_cmd_module, setup_single_br } } self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv6_only_output, 7) - + def test_config_load_mgmt_config_ipv4_ipv6(self, get_cmd_module, setup_single_broadcom_asic): device_desc_result = { 'DEVICE_METADATA': { @@ -1931,19 +1931,19 @@ def test_warm_restart_neighsyncd_timer_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + def test_warm_restart_neighsyncd_timer(self): config.ADHOC_VALIDATION = True runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["warm_restart"].commands["neighsyncd_timer"], ["0"], obj=obj) print(result.exit_code) print(result.output) assert result.exit_code != 0 assert "neighsyncd warm restart timer must be in range 1-9999" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_bgp_timer_yang_validation(self): @@ -1957,7 +1957,7 @@ def test_warm_restart_bgp_timer_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + def test_warm_restart_bgp_timer(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -1969,7 +1969,7 @@ def test_warm_restart_bgp_timer(self): print(result.output) assert result.exit_code != 0 assert "bgp warm restart timer must be in range 1-3600" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_teamsyncd_timer_yang_validation(self): @@ -1995,7 +1995,7 @@ def test_warm_restart_teamsyncd_timer(self): print(result.output) assert result.exit_code != 0 assert "teamsyncd warm restart timer must be in range 1-3600" in result.output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_mod_entry", mock.Mock(side_effect=ValueError)) def test_warm_restart_bgp_eoiu_yang_validation(self): @@ -2052,7 +2052,7 @@ def test_add_cablelength_invalid_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Invalid ConfigDB. Error" in result.output - + @patch("config.main.ConfigDBConnector.get_entry", mock.Mock(return_value="Port Info")) @patch("config.main.is_dynamic_buffer_enabled", mock.Mock(return_value=True)) def test_add_cablelength_with_invalid_name_invalid_length(self): @@ -2078,7 +2078,7 @@ def setup_class(cls): print("SETUP") import config.main importlib.reload(config.main) - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) def test_add_loopback_with_invalid_name_yang_validation(self): @@ -2116,7 +2116,7 @@ def test_del_nonexistent_loopback_adhoc_validation(self): print(result.output) assert result.exit_code != 0 assert "Loopback12 does not exist" in result.output - + def test_del_nonexistent_loopback_adhoc_validation(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -2128,7 +2128,7 @@ def test_del_nonexistent_loopback_adhoc_validation(self): print(result.output) assert result.exit_code != 0 assert "Loopbax1 is invalid, name should have prefix 'Loopback' and suffix '<0-999>'" in result.output - + @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(return_value=True)) @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) def test_add_loopback_yang_validation(self): @@ -2152,7 +2152,7 @@ def test_add_loopback_adhoc_validation(self): print(result.exit_code) print(result.output) assert result.exit_code == 0 - + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -2635,3 +2635,110 @@ def test_date_bad(self): @classmethod def teardown_class(cls): print('TEARDOWN') + + +class TestApplyPatchMultiAsic(unittest.TestCase): + def setUp(self): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + self.runner = CliRunner() + self.patch_file_path = 'path/to/patch.json' + self.patch_content = [ + { + "op": "add", + "path": "/localhost/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "add", + "path": "/asic0/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "replace", + "path": "/asic1/PORT/Ethernet1/mtu", + "value": "9200" + } + ] + + def test_apply_patch_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path], catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_database_config() \ No newline at end of file diff --git a/tests/config_xcvr_test.py b/tests/config_xcvr_test.py index 5043aa89d5c..6e05996db10 100644 --- a/tests/config_xcvr_test.py +++ b/tests/config_xcvr_test.py @@ -1,3 +1,4 @@ +from unittest.mock import patch import click import config.main as config import operator @@ -47,6 +48,25 @@ def test_config_tx_power(self, ctx): result = self.basic_check("tx_power", ["PortChannel0001", "11.3"], ctx, operator.ne) assert 'Invalid port PortChannel0001' in result.output + @patch("config.main.ConfigDBConnector.get_entry") + def test_dom(self, mock_get_entry, ctx): + interface_name = 'Ethernet0' + desired_config = 'enable' + + result = self.basic_check("dom", ["", desired_config], ctx, operator.ne) + assert "Interface name is invalid. Please enter a valid interface name!!" in result.output + + mock_get_entry.return_value = None + result = self.basic_check("dom", [interface_name, desired_config], ctx, operator.ne) + assert "Interface {} does not exist".format(interface_name) in result.output + + mock_get_entry.return_value = {'subport': '2'} + result = self.basic_check("dom", [interface_name, desired_config], ctx, operator.ne) + assert "DOM monitoring config only supported for subports {}".format(config.DOM_CONFIG_SUPPORTED_SUBPORTS) in result.output + + mock_get_entry.return_value = {'subport': '1'} + result = self.basic_check("dom", [interface_name, desired_config], ctx) + def basic_check(self, command_name, para_list, ctx, op=operator.eq, expect_result=0): runner = CliRunner() result = runner.invoke(config.config.commands["interface"].commands["transceiver"].commands[command_name], para_list, obj = ctx) diff --git a/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json new file mode 100644 index 00000000000..1502ad0b294 --- /dev/null +++ b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-expected.json @@ -0,0 +1,11 @@ +{ + "VERSIONS|DATABASE": { + "VERSION": "version_3_0_3" + }, + "DEVICE_METADATA|localhost": { + "synchronous_mode": "enable", + "docker_routing_config_mode": "separated", + "platform": "x86_64-nvidia_sn5600-r0", + "hwsku": "Mellanox-SN5600-O128" + } +} diff --git a/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json new file mode 100644 index 00000000000..8f2b67da080 --- /dev/null +++ b/tests/db_migrator_input/config_db/empty-config-with-device-info-nvidia-input.json @@ -0,0 +1,6 @@ +{ + "DEVICE_METADATA|localhost": { + "platform": "x86_64-nvidia_sn5600-r0", + "hwsku": "Mellanox-SN5600-O128" + } +} diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable.json b/tests/db_migrator_input/config_db/per_command_aaa_disable.json new file mode 100644 index 00000000000..215e3d7fe32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable.json @@ -0,0 +1,6 @@ +{ + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json new file mode 100644 index 00000000000..215e3d7fe32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable_expected.json @@ -0,0 +1,6 @@ +{ + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json new file mode 100644 index 00000000000..abc38879b6d --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_disable_golden.json @@ -0,0 +1,8 @@ +{ + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable.json b/tests/db_migrator_input/config_db/per_command_aaa_enable.json new file mode 100644 index 00000000000..0026e038504 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable.json @@ -0,0 +1,9 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_enable_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json new file mode 100644 index 00000000000..694d2f5cb32 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication.json @@ -0,0 +1,9 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_authentication_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change.json new file mode 100644 index 00000000000..518e1af6dbf --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change.json @@ -0,0 +1,15 @@ +{ + "AAA|accounting": { + "login": "local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "AAA|authorization": { + "login": "local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json new file mode 100644 index 00000000000..518e1af6dbf --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change_expected.json @@ -0,0 +1,15 @@ +{ + "AAA|accounting": { + "login": "local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "AAA|authorization": { + "login": "local" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_change_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json new file mode 100644 index 00000000000..6ec39507a19 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey.json @@ -0,0 +1,8 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json new file mode 100644 index 00000000000..690620e52f3 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_expected.json @@ -0,0 +1,11 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json new file mode 100644 index 00000000000..b06af48439b --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_passkey_golden.json @@ -0,0 +1,15 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json new file mode 100644 index 00000000000..c45e0745ed5 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus.json @@ -0,0 +1,5 @@ +{ + "AAA|authentication": { + "login": "tacacs+" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json new file mode 100644 index 00000000000..d39c98b7a54 --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_expected.json @@ -0,0 +1,12 @@ +{ + "AAA|accounting": { + "login": "tacacs+,local" + }, + "AAA|authentication": { + "login": "tacacs+" + }, + "TACPLUS|global": { + "auth_type": "login", + "passkey": "testpasskey" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json new file mode 100644 index 00000000000..005a2fd398e --- /dev/null +++ b/tests/db_migrator_input/config_db/per_command_aaa_no_tacplus_golden.json @@ -0,0 +1,19 @@ +{ + "AAA": { + "accounting": { + "login": "tacacs+,local" + }, + "authentication": { + "login": "tacacs+" + }, + "authorization": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "auth_type": "login", + "passkey": "testpasskey" + } + } +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 21ca9148df6..e21539766a1 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -4,8 +4,9 @@ import argparse from unittest import mock from deepdiff import DeepDiff +import json -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, SonicDBConfig from sonic_py_common import device_info from .mock_tables import dbconnector @@ -156,6 +157,7 @@ def check_appl_db(self, result, expected): ['empty-config', 'empty-config-with-device-info-generic', 'empty-config-with-device-info-traditional', + 'empty-config-with-device-info-nvidia', 'non-default-config', 'non-default-xoff', 'non-default-lossless-profile-in-pg', @@ -889,6 +891,22 @@ def test_init(self, mock_args): import db_migrator db_migrator.main() + @mock.patch('argparse.ArgumentParser.parse_args') + @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) + @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) + def test_init_no_namespace(self, mock_args): + mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + import db_migrator + db_migrator.main() + + @mock.patch('argparse.ArgumentParser.parse_args') + @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) + @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) + def test_init_namespace(self, mock_args): + mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + import db_migrator + db_migrator.main() + class TestGNMIMigrator(object): @classmethod @@ -944,3 +962,51 @@ def test_dns_nameserver_migrator_configdb(self): diff = DeepDiff(resulting_table, expected_table, ignore_order=True) assert not diff + +class TestAAAMigrator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def load_golden_config(self, dbmgtr, test_json): + dbmgtr.config_src_data = {} + + json_path = os.path.join(mock_db_path, 'config_db', test_json + ".json") + if os.path.exists(json_path): + with open(json_path) as f: + dbmgtr.config_src_data = json.load(f) + print("test_per_command_aaa load golden config success, config_src_data: {}".format(dbmgtr.config_src_data)) + else: + print("test_per_command_aaa load golden config failed, file {} does not exist.".format(test_json)) + + + @pytest.mark.parametrize('test_json', ['per_command_aaa_enable', + 'per_command_aaa_no_passkey', + 'per_command_aaa_disable', + 'per_command_aaa_no_change', + 'per_command_aaa_no_tacplus', + 'per_command_aaa_no_authentication']) + def test_per_command_aaa(self, test_json): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', test_json) + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + self.load_golden_config(dbmgtr, test_json + '_golden') + dbmgtr.migrate_tacplus() + dbmgtr.migrate_aaa() + resulting_table = dbmgtr.configDB.get_table("AAA") + + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', test_json + '_expected') + expected_db = Db() + expected_table = expected_db.cfgdb.get_table("AAA") + + print("test_per_command_aaa: {}".format(test_json)) + print("test_per_command_aaa, resulting_table: {}".format(resulting_table)) + print("test_per_command_aaa, expected_table: {}".format(expected_table)) + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff diff --git a/tests/dualtor_neighbor_check_test.py b/tests/dualtor_neighbor_check_test.py index 5916a183a03..fb9475129cc 100644 --- a/tests/dualtor_neighbor_check_test.py +++ b/tests/dualtor_neighbor_check_test.py @@ -235,7 +235,7 @@ def test_read_from_db(self, mock_log_functions): assert asic_route_table == result[4] assert asic_neigh_table == result[5] - def test_read_from_db_with_lua_cache(self, mock_log_functions): + def test_read_from_db_script_not_existed(self, mock_log_functions): with patch("dualtor_neighbor_check.run_command") as mock_run_command: neighbors = {"192.168.0.2": "ee:86:d8:46:7d:01"} mux_states = {"Ethernet4": "active"} @@ -243,23 +243,73 @@ def test_read_from_db_with_lua_cache(self, mock_log_functions): asic_fdb = {"ee:86:d8:46:7d:01": "oid:0x3a00000000064b"} asic_route_table = [] asic_neigh_table = ["{\"ip\":\"192.168.0.23\",\"rif\":\"oid:0x6000000000671\",\"switch_id\":\"oid:0x21000000000000\"}"] - mock_run_command.return_value = json.dumps( - { - "neighbors": neighbors, - "mux_states": mux_states, - "hw_mux_states": hw_mux_states, - "asic_fdb": asic_fdb, - "asic_route_table": asic_route_table, - "asic_neigh_table": asic_neigh_table - } + mock_run_command.side_effect = [ + "(integer) 0", + "c53fd5eaad68be1e66a2fe80cd20a9cb18c91259", + json.dumps( + { + "neighbors": neighbors, + "mux_states": mux_states, + "hw_mux_states": hw_mux_states, + "asic_fdb": asic_fdb, + "asic_route_table": asic_route_table, + "asic_neigh_table": asic_neigh_table + } + ) + ] + mock_appl_db = MagicMock() + mock_appl_db.get = MagicMock(return_value="c53fd5eaad68be1e66a2fe80cd20a9cb18c91259") + + result = dualtor_neighbor_check.read_tables_from_db(mock_appl_db) + + mock_appl_db.get.assert_called_once_with("_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1") + mock_run_command.assert_has_calls( + [ + call("sudo redis-cli SCRIPT EXISTS c53fd5eaad68be1e66a2fe80cd20a9cb18c91259"), + call("sudo redis-cli SCRIPT LOAD \"%s\"" % dualtor_neighbor_check.DB_READ_SCRIPT), + call("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + ] ) + assert neighbors == result[0] + assert mux_states == result[1] + assert hw_mux_states == result[2] + assert {k: v.lstrip("oid:0x") for k, v in asic_fdb.items()} == result[3] + assert asic_route_table == result[4] + assert asic_neigh_table == result[5] + + def test_read_from_db_with_lua_cache(self, mock_log_functions): + with patch("dualtor_neighbor_check.run_command") as mock_run_command: + neighbors = {"192.168.0.2": "ee:86:d8:46:7d:01"} + mux_states = {"Ethernet4": "active"} + hw_mux_states = {"Ethernet4": "active"} + asic_fdb = {"ee:86:d8:46:7d:01": "oid:0x3a00000000064b"} + asic_route_table = [] + asic_neigh_table = ["{\"ip\":\"192.168.0.23\",\"rif\":\"oid:0x6000000000671\",\"switch_id\":\"oid:0x21000000000000\"}"] + mock_run_command.side_effect = [ + "(integer) 1", + json.dumps( + { + "neighbors": neighbors, + "mux_states": mux_states, + "hw_mux_states": hw_mux_states, + "asic_fdb": asic_fdb, + "asic_route_table": asic_route_table, + "asic_neigh_table": asic_neigh_table + } + ) + ] mock_appl_db = MagicMock() mock_appl_db.get = MagicMock(return_value="c53fd5eaad68be1e66a2fe80cd20a9cb18c91259") result = dualtor_neighbor_check.read_tables_from_db(mock_appl_db) mock_appl_db.get.assert_called_once_with("_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1") - mock_run_command.assert_called_once_with("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + mock_run_command.assert_has_calls( + [ + call("sudo redis-cli SCRIPT EXISTS c53fd5eaad68be1e66a2fe80cd20a9cb18c91259"), + call("sudo redis-cli EVALSHA c53fd5eaad68be1e66a2fe80cd20a9cb18c91259 0") + ] + ) assert neighbors == result[0] assert mux_states == result[1] assert hw_mux_states == result[2] diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index 625c1d14a0d..cc4c049806e 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -151,6 +151,55 @@ 7 0 93 up """ +multi_asic_fabric_capacity = """\ +Monitored fabric capacity threshold: 100% + + ASIC Operating Total # % Last Event Last Time + Links of Links +------ ----------- ---------- ---- ------------ ----------- + asic0 5 8 62.5 None Never + asic1 2 8 25 None Never +""" + +multi_asic_fabric_capacity_asic0 = """\ +Monitored fabric capacity threshold: 100% + + ASIC Operating Total # % Last Event Last Time + Links of Links +------ ----------- ---------- ---- ------------ ----------- + asic0 5 8 62.5 None Never +""" + +multi_asic_fabric_isolation = """\ + +asic0 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 2 0 0 0 + 4 0 0 0 + 6 0 0 0 + 7 0 0 0 + +asic1 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 4 0 0 0 +""" + +multi_asic_fabric_isolation_asic0 = """\ + +asic0 + Local Link Auto Isolated Manual Isolated Isolated +------------ --------------- ----------------- ---------- + 0 0 0 0 + 2 0 0 0 + 4 0 0 0 + 6 0 0 0 + 7 0 0 0 +""" + class TestFabricStat(object): @classmethod def setup_class(cls): @@ -271,6 +320,34 @@ def test_multi_show_fabric_reachability_asic(self): assert return_code == 0 assert result == multi_asic_fabric_reachability_asic0 + def test_mutli_show_fabric_capacity(self): + return_code, result = get_result_and_return_code(['fabricstat', '-c']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_capacity + + def test_multi_show_fabric_capacity_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-c', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_capacity_asic0 + + def test_multi_show_fabric_isolation(self): + return_code, result = get_result_and_return_code(['fabricstat', '-i']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_isolation + + def test_multi_show_fabric_isolation_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-i', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_isolation_asic0 + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index afe166b008f..4c9b33c3a4d 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -74,16 +74,28 @@ def debug_print(msg): # Mimics os.system call for sonic-cfggen -d --print-data > filename -# -def os_system_cfggen(cmd): +def subprocess_Popen_cfggen(cmd, *args, **kwargs): global running_config - fname = cmd.split(">")[-1].strip() + # Extract file name from kwargs if 'stdout' is a file object + stdout = kwargs.get('stdout') + if hasattr(stdout, 'name'): + fname = stdout.name + else: + raise ValueError("stdout is not a file") + + # Write the running configuration to the file specified in stdout with open(fname, "w") as s: - s.write(json.dumps(running_config, indent=4)) - debug_print("File created {} type={} cfg={}".format(fname, - type(running_config), json.dumps(running_config)[1:40])) - return 0 + json.dump(running_config, s, indent=4) + + class MockPopen: + def __init__(self): + self.returncode = 0 # Simulate successful command execution + + def communicate(self): + return "", "" # Simulate empty stdout and stderr + + return MockPopen() # mimics config_db.set_entry @@ -213,14 +225,14 @@ def vlan_validate(old_cfg, new_cfg, keys): class TestChangeApplier(unittest.TestCase): - @patch("generic_config_updater.change_applier.os.system") + @patch("generic_config_updater.change_applier.subprocess.Popen") @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") - def test_change_apply(self, mock_set, mock_db, mock_os_sys): + def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): global read_data, running_config, json_changes, json_change_index global start_running_config - mock_os_sys.side_effect = os_system_cfggen + mock_subprocess_Popen.side_effect = subprocess_Popen_cfggen mock_db.return_value = DB_HANDLE mock_set.side_effect = set_entry diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py new file mode 100644 index 00000000000..e8b277618f1 --- /dev/null +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -0,0 +1,172 @@ +import unittest +from importlib import reload +from unittest.mock import patch, MagicMock +from generic_config_updater.generic_updater import extract_scope +import generic_config_updater.change_applier +import generic_config_updater.services_validator +import generic_config_updater.gu_common + + +class TestMultiAsicChangeApplier(unittest.TestCase): + + def test_extract_scope(self): + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), + "/sometable/data": (True, "", "/sometable/data"), + "": (False, "", ""), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic77": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception as e: + assert(result == False) + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.return_value = { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + # Instantiate ChangeApplier with the default namespace + applier = generic_config_updater.change_applier.ChangeApplier() + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Call the apply method with the change object + applier.apply(change) + + # Assert ConfigDBConnector called with the correct namespace + mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="") + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.return_value = { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + # Instantiate ChangeApplier with the default namespace + applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Call the apply method with the change object + applier.apply(change) + + # Assert ConfigDBConnector called with the correct namespace + mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to return some running configuration + mock_get_running_config.side_effect = Exception("Failed to get running config") + # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment + namespace = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + + # Prepare a change object or data that applier.apply would use + change = MagicMock() + + # Test the behavior when os.system fails + with self.assertRaises(Exception) as context: + applier.apply(change) + + self.assertTrue('Failed to get running config' in str(context.exception)) + + @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) + def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, mock_get_running_config): + # Setup mock for ConfigDBConnector + mock_db = MagicMock() + mock_ConfigDBConnector.return_value = mock_db + + # Setup mock for json.load to simulate configuration where crucial tables are unexpectedly empty + mock_get_running_config.return_value = { + "tables": { + # Simulate empty tables or missing crucial configuration + }, + "services": { + # Normally, services would be listed here + } + } + + # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + + # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables + change = MagicMock() + + # Apply the patch + try: + assert(applier.apply(change) != 0) + except Exception: + pass diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py new file mode 100644 index 00000000000..4a55eb98be3 --- /dev/null +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -0,0 +1,167 @@ +import json +import jsonpatch +import unittest +from unittest.mock import patch, MagicMock + +import generic_config_updater.change_applier +import generic_config_updater.generic_updater +import generic_config_updater.services_validator +import generic_config_updater.gu_common + +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import generic_updater as gu + +class TestMultiAsicPatchApplier(unittest.TestCase): + + @patch('generic_config_updater.gu_common.ConfigWrapper.get_empty_tables', return_value=[]) + @patch('generic_config_updater.gu_common.ConfigWrapper.get_config_db_as_json') + @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') + @patch('generic_config_updater.generic_updater.ChangeApplier') + def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): + namespace = "asic0" + patch_data = jsonpatch.JsonPatch([ + { + "op": "add", + "path": "/ACL_TABLE/NEW_ACL_TABLE", + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet1/mtu", + "value": "9200" + } + ]) + + original_config = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + applied_config = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + }, + "NEW_ACL_TABLE": { + "policy_desc": "New ACL Table", + "ports": [ + "Ethernet1", + "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9200", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + mock_get_config.side_effect = [ + original_config, + original_config, + original_config, + applied_config + ] + + mock_simulate_patch.return_value = { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": [ + "Ethernet1", "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + }, + "NEW_ACL_TABLE": { + "policy_desc": "New ACL Table", + "ports": [ + "Ethernet1", + "Ethernet2" + ], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9200", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + + patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + + # Apply the patch and verify + patch_applier.apply(patch_data) + + # Assertions to ensure the namespace is correctly used in underlying calls + mock_ChangeApplier.assert_called_once_with(namespace=namespace) diff --git a/tests/intfutil_test.py b/tests/intfutil_test.py index 469c73e0718..f0c75a4c0f5 100644 --- a/tests/intfutil_test.py +++ b/tests/intfutil_test.py @@ -11,23 +11,23 @@ scripts_path = os.path.join(modules_path, "scripts") show_interface_status_output="""\ - Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC ---------------- --------------- ------- ----- ----- --------- --------------- ------ ------- --------------- ---------- - Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off - Ethernet16 16 100M 9100 N/A etp5 trunk up up RJ45 off - Ethernet24 24 1G 9100 N/A etp6 trunk up up QSFP28 or later off - Ethernet28 28 1000M 9100 N/A etp8 trunk up up RJ45 off - Ethernet32 13,14,15,16 40G 9100 rs etp9 PortChannel1001 up up N/A off - Ethernet36 9,10,11,12 10M 9100 N/A etp10 routed up up RJ45 off - Ethernet112 93,94,95,96 40G 9100 rs etp29 PortChannel0001 up up N/A off - Ethernet116 89,90,91,92 40G 9100 rs etp30 PortChannel0002 up up N/A off - Ethernet120 101,102,103,104 40G 9100 rs etp31 PortChannel0003 up up N/A off - Ethernet124 97,98,99,100 40G 9100 auto etp32 PortChannel0004 up up N/A off -PortChannel0001 N/A 40G 9100 N/A N/A routed down up N/A N/A -PortChannel0002 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel0003 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel0004 N/A 40G 9100 N/A N/A routed up up N/A N/A -PortChannel1001 N/A 40G 9100 N/A N/A trunk N/A N/A N/A N/A + Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC +--------------- --------------- ------- ----- ----- --------- --------------- ------ ------- ----------------- ---------- + Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off + Ethernet16 16 100M 9100 N/A etp5 trunk up up RJ45 off + Ethernet24 24 1G 9100 N/A etp6 trunk up up DPU-NPU Data Port off + Ethernet28 28 1000M 9100 N/A etp8 trunk up up RJ45 off + Ethernet32 13,14,15,16 40G 9100 rs etp9 PortChannel1001 up up N/A off + Ethernet36 9,10,11,12 10M 9100 N/A etp10 routed up up RJ45 off + Ethernet112 93,94,95,96 40G 9100 rs etp29 PortChannel0001 up up N/A off + Ethernet116 89,90,91,92 40G 9100 rs etp30 PortChannel0002 up up N/A off + Ethernet120 101,102,103,104 40G 9100 rs etp31 PortChannel0003 up up N/A off + Ethernet124 97,98,99,100 40G 9100 auto etp32 PortChannel0004 up up N/A off +PortChannel0001 N/A 40G 9100 N/A N/A routed down up N/A N/A +PortChannel0002 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel0003 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel0004 N/A 40G 9100 N/A N/A routed up up N/A N/A +PortChannel1001 N/A 40G 9100 N/A N/A trunk N/A N/A N/A N/A """ show_interface_status_Ethernet32_output="""\ diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index 2f262a4a09c..6003e7401a4 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -13,6 +13,7 @@ import utilities_common.bgp_util as bgp_util ERROR_MSG = "Error: IP address is not valid" +NOT_EXIST_VLAN_ERROR_MSG ="does not exist" INVALID_VRF_MSG ="""\ Usage: bind [OPTIONS] @@ -43,6 +44,37 @@ def setup_class(cls): def mock_run_bgp_command(): return "" + def test_add_vlan_interface_ipv4(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config int ip add Vlan100 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan100", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + # create vlan 4093 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["4093"], obj=db) + # config int ip add Vlan4093 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan4093", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + # config int ip add Vlan000000000000003 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan000000000000003", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + # config int ip add Vlan1.2 1.1.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], ["Vlan1.2", "1.1.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert NOT_EXIST_VLAN_ERROR_MSG in result.output + + def test_add_del_interface_valid_ipv4(self): db = Db() runner = CliRunner() @@ -67,22 +99,28 @@ def test_add_del_interface_valid_ipv4(self): assert ('Eth36.10', '32.11.10.1/24') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Ethernet64 10.10.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet64", "10.10.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet0.10 10.11.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "10.11.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet0.10', '10.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "10.11.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet0.10', '10.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Eth36.10 32.11.10.1/24 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "32.11.10.1/24"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "32.11.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') def test_add_interface_invalid_ipv4(self): db = Db() @@ -153,20 +191,26 @@ def test_add_del_interface_valid_ipv6(self): assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') # config int ip remove Ethernet72 2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet72', '2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('INTERFACE') - - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet0.10', '1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') - - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet72', '2001:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('INTERFACE') + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet0.10", "1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet0.10', '1010:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Eth36.10", "3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Eth36.10', '3210:1db8:11a3:19d7:1f34:8a2e:17a0:765d/34') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') def test_del_interface_case_sensitive_ipv6(self): db = Db() @@ -177,10 +221,12 @@ def test_del_interface_case_sensitive_ipv6(self): assert ('Ethernet72', 'FC00::1/126') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet72 FC00::1/126 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "FC00::1/126"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet72', 'FC00::1/126') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet72", "FC00::1/126"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet72', 'FC00::1/126') not in db.cfgdb.get_table('INTERFACE') def test_add_interface_invalid_ipv6(self): db = Db() @@ -216,10 +262,12 @@ def test_add_del_interface_ipv6_with_leading_zeros(self): assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet68 2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d/34"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet68', '2001:db8:11a3:9d7:1f34:8a2e:7a0:765d/34') not in db.cfgdb.get_table('INTERFACE') def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): db = Db() @@ -233,10 +281,12 @@ def test_add_del_interface_shortened_ipv6_with_leading_zeros(self): assert ('Ethernet68', '3000::1/64') in db.cfgdb.get_table('INTERFACE') # config int ip remove Ethernet68 3000::001/64 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "3000::001/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 - assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Ethernet68", "3000::001/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet68', '3000::1/64') not in db.cfgdb.get_table('INTERFACE') def test_intf_vrf_bind_unbind(self): runner = CliRunner() diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 2889e6b202e..e967caa7585 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -79,7 +79,8 @@ "pfc_asym": "off", "mtu": "9100", "tpid": "0x8100", - "admin_status": "up" + "admin_status": "up", + "role": "Dpc" }, "PORT_TABLE:Ethernet28": { "index": "7", diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 4bb4c9d9e93..7b4cac3430e 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -287,5 +287,20 @@ "ports@": "Ethernet124", "type": "L3", "stage": "ingress" + }, + "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshIsolation": "1", + "monPollThreshRecovery": "8" + }, + "SYSLOG_CONFIG_FEATURE|bgp": { + "rate_limit_interval": "111", + "rate_limit_burst": "33333" + }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "222", + "rate_limit_burst": "22222" } } diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 6ae0258be05..4f3f13c0ae0 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -304,5 +304,15 @@ }, "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { "status": "Active" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "221580", + "missing_capacity": "132948", + "operating_links": "5", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 1cded681491..56823ae113b 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -227,5 +227,20 @@ "holdtime": "10", "asn": "65200", "keepalive": "3" + }, + "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshIsolation": "1", + "monPollThreshRecovery": "8" + }, + "SYSLOG_CONFIG_FEATURE|bgp": { + "rate_limit_interval": "444", + "rate_limit_burst": "44444" + }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "555", + "rate_limit_burst": "55555" } } diff --git a/tests/mock_tables/asic1/state_db.json b/tests/mock_tables/asic1/state_db.json index 7397d25b8fc..136a3329905 100644 --- a/tests/mock_tables/asic1/state_db.json +++ b/tests/mock_tables/asic1/state_db.json @@ -29,8 +29,6 @@ "media_interface_technology" : "1550 nm DFB", "vendor_rev" : "XX", "cmis_rev" : "4.1", - "active_firmware" : "X.X", - "inactive_firmware" : "X.X", "supported_max_tx_power" : "4.0", "supported_min_tx_power" : "-22.9", "supported_max_laser_freq" : "196100", @@ -70,6 +68,16 @@ "vcclowalarm": "2.9700", "vcclowwarning": "3.1349" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { + "active_firmware": "X.X", + "inactive_firmware": "X.X", + "e1_active_firmware" : "X.X", + "e1_inactive_firmware" : "Y.Y", + "e1_server_firmware" : "A.B.C.D", + "e2_active_firmware" : "X.X", + "e2_inactive_firmware" : "Y.Y", + "e2_server_firmware" : "A.B.C.D" + }, "CHASSIS_INFO|chassis 1": { "psu_num": "2" }, @@ -262,5 +270,15 @@ }, "FABRIC_PORT_TABLE|PORT7" : { "STATUS": "down" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "88632", + "missing_capacity": "265896", + "operating_links": "2", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 2a81f96bfac..b2bf54c995a 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -97,7 +97,8 @@ "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "1000" + "speed": "1000", + "role": "Dpc" }, "PORT|Ethernet28": { "admin_status": "up", @@ -793,13 +794,19 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", - "set_owner": "local" + "set_owner": "local", + "support_syslog_rate_limit": "true", + "has_global_scope": "false", + "has_per_asic_scope": "true" }, "FEATURE|database": { "state": "always_enabled", "auto_restart": "always_enabled", "high_mem_alert": "disabled", - "set_owner": "local" + "set_owner": "local", + "support_syslog_rate_limit": "true", + "has_global_scope": "true", + "has_per_asic_scope": "true" }, "FEATURE|dhcp_relay": { "state": "enabled", @@ -823,7 +830,10 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", - "set_owner": "kube" + "set_owner": "kube", + "support_syslog_rate_limit": "true", + "has_global_scope": "true", + "has_per_asic_scope": "false" }, "FEATURE|radv": { "state": "enabled", @@ -873,6 +883,14 @@ "high_mem_alert": "disabled", "set_owner": "kube" }, + "SYSLOG_CONFIG_FEATURE|database": { + "rate_limit_interval": "200", + "rate_limit_burst": "20000" + }, + "SYSLOG_CONFIG_FEATURE|pmon": { + "rate_limit_interval": "100", + "rate_limit_burst": "10000" + }, "DEVICE_METADATA|localhost": { "default_bgp_status": "down", "default_pfcwd_status": "enable", @@ -2685,6 +2703,7 @@ "direction": "RX" }, "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monCapacityThreshWarn": "100", "monErrThreshCrcCells": "1", "monErrThreshRxCells": "61035156", "monPollThreshIsolation": "1", @@ -2692,19 +2711,52 @@ }, "FABRIC_PORT|Fabric0": { "alias": "Fabric0", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "0" }, "FABRIC_PORT|Fabric1": { "alias": "Fabric1", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "1" }, "FABRIC_PORT|Fabric2": { "alias": "Fabric2", + "forceUnisolateStatus": "0", "isolateStatus": "False", "lanes": "2" }, + "FABRIC_PORT|Fabric3": { + "alias": "Fabric3", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "3" + }, + "FABRIC_PORT|Fabric4": { + "alias": "Fabric4", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "4" + }, + "FABRIC_PORT|Fabric5": { + "alias": "Fabric5", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "5" + }, + "FABRIC_PORT|Fabric6": { + "alias": "Fabric6", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "6" + }, + "FABRIC_PORT|Fabric7": { + "alias": "Fabric7", + "forceUnisolateStatus": "0", + "isolateStatus": "False", + "lanes": "7" + }, "DHCP_RELAY|Vlan1000": { "dhcpv6_servers": [ "fc02:2000::1" diff --git a/tests/mock_tables/mock_single_asic.py b/tests/mock_tables/mock_single_asic.py index 08c2157c9de..ac97c4bc79a 100644 --- a/tests/mock_tables/mock_single_asic.py +++ b/tests/mock_tables/mock_single_asic.py @@ -4,6 +4,8 @@ from sonic_py_common import multi_asic from utilities_common import multi_asic as multi_asic_util +add_unknown_intf=False + mock_intf_table = { '': { 'eth0': { @@ -60,6 +62,8 @@ def mock_single_asic_get_ip_intf_from_ns(namespace): interfaces = [] try: interfaces = list(mock_intf_table[namespace].keys()) + if add_unknown_intf: + interfaces.append("unknownintf") except KeyError: pass return interfaces @@ -70,7 +74,8 @@ def mock_single_asic_get_ip_intf_addr_from_ns(namespace, iface): try: ipaddresses = mock_intf_table[namespace][iface] except KeyError: - pass + if add_unknown_intf: + raise ValueError("Unknow interface") return ipaddresses diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 993e05c4806..49ffaeedd89 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -173,24 +173,6 @@ "nominal_bit_rate": "N/A", "application_advertisement": "N/A" }, - "TRANSCEIVER_INFO|Ethernet24": { - "type": "QSFP28 or later", - "hardware_rev": "AC", - "serial": "MT1706FT02066", - "manufacturer": "Mellanox", - "model": "MFA1A00-C003", - "vendor_oui": "00-02-c9", - "vendor_date": "2017-01-13 ", - "connector": "No separable connector", - "encoding": "64B66B", - "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", - "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", - "cable_type": "Length Cable Assembly(m)", - "cable_length": "3", - "specification_compliance": "{'10/40G Ethernet Compliance Code': '40G Active Cable (XLPPI)'}", - "nominal_bit_rate": "255", - "application_advertisement": "N/A" - }, "TRANSCEIVER_INFO|Ethernet28": { "type": "RJ45", "hardware_rev": "N/A", @@ -374,6 +356,7 @@ "rx_sig_power_max": "40" }, "TRANSCEIVER_STATUS|Ethernet44":{ + "cmis_state": "READY", "DP1State": "DataPathActivated", "DP2State": "DataPathActivated", "DP3State": "DataPathActivated", @@ -684,16 +667,23 @@ "media_interface_technology" : "1550 nm DFB", "vendor_rev" : "XX", "cmis_rev" : "4.1", - "active_firmware" : "X.X", - "inactive_firmware" : "X.X", "supported_max_tx_power" : "4.0", "supported_min_tx_power" : "-22.9", "supported_max_laser_freq" : "196100", "supported_min_laser_freq" : "191300" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet64": { + "active_firmware": "X.X", + "inactive_firmware": "X.X", + "e1_active_firmware" : "X.X", + "e1_inactive_firmware" : "Y.Y", + "e1_server_firmware" : "A.B.C.D", + "e2_active_firmware" : "X.X", + "e2_inactive_firmware" : "Y.Y", + "e2_server_firmware" : "A.B.C.D" + }, "TRANSCEIVER_INFO|Ethernet72": { "active_apsel_hostlane4": "N/A", - "active_firmware": "0.0", "is_replaceable": "True", "application_advertisement": "{1: {'host_electrical_interface_id': 'IB NDR', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 4, 'host_lane_count': 4, 'host_lane_assignment_options': 17}, 2: {'host_electrical_interface_id': 'IB SDR (Arch.Spec.Vol.2)', 'module_media_interface_id': 'Copper cable', 'media_lane_count': 4, 'host_lane_count': 4, 'host_lane_assignment_options': 17}}", "host_electrical_interface": "N/A", @@ -710,7 +700,6 @@ "supported_min_laser_freq": "N/A", "serial": "serial1 ", "active_apsel_hostlane7": "N/A", - "inactive_firmware": "N/A", "active_apsel_hostlane1": "N/A", "type": "OSFP 8X Pluggable Transceiver", "cable_length": "1.0", @@ -796,6 +785,10 @@ "txbiaslowalarm": "N/A", "txbiaslowwarning": "N/A" }, + "TRANSCEIVER_FIRMWARE_INFO|Ethernet72": { + "active_firmware": "0.0", + "inactive_firmware": "N/A" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" @@ -1678,5 +1671,15 @@ "minimum_current": "339", "timestamp": "20230704 17:38:04", "warning_status": "False" + }, + "FABRIC_CAPACITY_TABLE|FABRIC_CAPACITY_DATA" : { + "fabric_capacity": "88632", + "missing_capacity": "265896", + "operating_links": "2", + "number_of_links": "8", + "warning_threshold": "100" + }, + "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { + "capacity": "80000" } } diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 3b38add9ff7..1f92b3d19ae 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -1,14 +1,17 @@ import copy from io import StringIO import json -import os import logging -import sys import syslog +import sys import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, APPL_STATE_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES +from tests.route_check_test_data import ( + APPL_DB, MULTI_ASIC, NAMESPACE, DEFAULTNS, ARGS, ASIC_DB, CONFIG_DB, + DEFAULT_CONFIG_DB, APPL_STATE_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, + UPD, FRR_ROUTES +) import pytest @@ -18,50 +21,37 @@ import route_check current_test_data = None - -tables_returned = {} selector_returned = None subscribers_returned = {} +db_conns = {} def set_test_case_data(ctdata): - """ - Setup global variables for each test case - """ - global current_test_data, tables_returned, selector_returned, subscribers_returned - + global current_test_data, db_conns, selector_returned, subscribers_returned current_test_data = ctdata - tables_returned = {} - selector_returned = None subscribers_returned = {} - def recursive_update(d, t): - assert (type(t) is dict) + assert type(t) is dict for k in t.keys(): if type(t[k]) is not dict: d.update(t) return - if k not in d: d[k] = {} recursive_update(d[k], t[k]) - class Table: - def __init__(self, db, tbl): self.db = db self.tbl = tbl - self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db, tbl])) - # print("Table:init: db={} tbl={} data={}".format(db, tbl, json.dumps(self.data, indent=4))) - + self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db["namespace"], db["name"], tbl])) def update(self): t = copy.deepcopy(self.get_val(current_test_data.get(UPD, {}), - [self.db, self.tbl, OP_SET])) + [self.db["namespace"], self.db["name"], self.tbl, OP_SET])) drop = copy.deepcopy(self.get_val(current_test_data.get(UPD, {}), - [self.db, self.tbl, OP_DEL])) + [self.db["namespace"], self.db["name"], self.tbl, OP_DEL])) if t: recursive_update(self.data, t) @@ -69,41 +59,41 @@ def update(self): self.data.pop(k, None) return (list(t.keys()), list(drop.keys())) - def get_val(self, d, keys): for k in keys: d = d[k] if k in d else {} return d - def getKeys(self): return list(self.data.keys()) - def get(self, key): ret = copy.deepcopy(self.data.get(key, {})) return (True, ret) - def hget(self, key, field): ret = copy.deepcopy(self.data.get(key, {}).get(field, {})) return True, ret +def conn_side_effect(arg, _1, _2, namespace): + return db_conns[namespace][arg] -db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "APPL_STATE_DB": APPL_STATE_DB } -def conn_side_effect(arg, _): - return db_conns[arg] - +def init_db_conns(namespaces): + for ns in namespaces: + db_conns[ns] = { + "APPL_DB": {"namespace": ns, "name": APPL_DB}, + "ASIC_DB": {"namespace": ns, "name": ASIC_DB}, + "APPL_STATE_DB": {"namespace": ns, "name": APPL_STATE_DB}, + "CONFIG_DB": ConfigDB(ns) + } def table_side_effect(db, tbl): - if not db in tables_returned: - tables_returned[db] = {} - if not tbl in tables_returned[db]: - tables_returned[db][tbl] = Table(db, tbl) - return tables_returned[db][tbl] + if not tbl in db.keys(): + db[tbl] = Table(db, tbl) + return db[tbl] -class mock_selector: +class MockSelector: TIMEOUT = 1 EMULATE_HANG = False @@ -111,21 +101,19 @@ def __init__(self): self.select_state = 0 self.select_cnt = 0 self.subs = None - # print("Mock Selector constructed") - + logger.debug("Mock Selector constructed") def addSelectable(self, subs): self.subs = subs return 0 - def select(self, timeout): # Toggle between good & timeout # state = self.select_state self.subs.update() - if mock_selector.EMULATE_HANG: + if MockSelector.EMULATE_HANG: time.sleep(60) if self.select_state == 0: @@ -136,29 +124,15 @@ def select(self, timeout): return (state, None) -class mock_db_conn: - def __init__(self, db): - self.db_name = None - for (k, v) in db_conns.items(): - if v == db: - self.db_name = k - assert self.db_name != None - - def getDbName(self): - return self.db_name - - -class mock_subscriber: +class MockSubscriber: def __init__(self, db, tbl): self.state = PRE self.db = db self.tbl = tbl - self.dbconn = mock_db_conn(db) self.mock_tbl = table_side_effect(self.db, self.tbl) self.set_keys = list(self.mock_tbl.data.keys()) self.del_keys = [] - def update(self): if self.state == PRE: s_keys, d_keys = self.mock_tbl.update() @@ -166,7 +140,6 @@ def update(self): self.del_keys += d_keys self.state = UPD - def pop(self): v = None if self.set_keys: @@ -180,61 +153,53 @@ def pop(self): k = "" op = "" - print("state={} k={} op={} v={}".format(self.state, k, op, str(v))) return (k, op, v) - - - def getDbConnector(self): - return self.dbconn - - - def getTableName(self): - return self.tbl - def subscriber_side_effect(db, tbl): global subscribers_returned - - key = "db_{}_tbl_{}".format(db, tbl) + key = "db_{}_{}_tbl_{}".format(db["namespace"], db["name"], tbl) if not key in subscribers_returned: - subscribers_returned[key] = mock_subscriber(db, tbl) + subscribers_returned[key] = MockSubscriber(db, tbl) return subscribers_returned[key] - def select_side_effect(): global selector_returned if not selector_returned: - selector_returned = mock_selector() + selector_returned = MockSelector() return selector_returned +def config_db_side_effect(namespace): + return db_conns[namespace]["CONFIG_DB"] -def table_side_effect(db, tbl): - if not db in tables_returned: - tables_returned[db] = {} - if not tbl in tables_returned[db]: - tables_returned[db][tbl] = Table(db, tbl) - return tables_returned[db][tbl] - +class ConfigDB: + def __init__(self, namespace): + self.namespace = namespace + self.name = CONFIG_DB + self.db = current_test_data.get(PRE, {}).get(namespace, {}).get(CONFIG_DB, DEFAULT_CONFIG_DB) if current_test_data is not None else DEFAULT_CONFIG_DB -def config_db_side_effect(table): - if CONFIG_DB not in current_test_data[PRE]: - return DEFAULT_CONFIG_DB[table] - if not CONFIG_DB in tables_returned: - tables_returned[CONFIG_DB] = {} - if not table in tables_returned[CONFIG_DB]: - tables_returned[CONFIG_DB][table] = current_test_data[PRE][CONFIG_DB].get(table, {}) - return tables_returned[CONFIG_DB][table] + def get_table(self, table): + return self.db.get(table, {}) + def get_entry(self, table, key): + return self.get_table(table).get(key, {}) def set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db): mock_conn.side_effect = conn_side_effect mock_table.side_effect = table_side_effect mock_sel.side_effect = select_side_effect mock_subs.side_effect = subscriber_side_effect - mock_config_db.get_table = MagicMock(side_effect=config_db_side_effect) + mock_config_db.side_effect = config_db_side_effect class TestRouteCheck(object): + @staticmethod + def extract_namespace_from_args(args): + # args: ['show', 'ip', 'route', '-n', 'asic0', 'json'], + for i, arg in enumerate(args): + if arg == "-n" and i + 1 < len(args): + return args[i + 1] + return DEFAULTNS + def setup(self): pass @@ -246,21 +211,20 @@ def init(self): def force_hang(self): old_timeout = route_check.TIMEOUT_SECONDS route_check.TIMEOUT_SECONDS = 5 - mock_selector.EMULATE_HANG = True + MockSelector.EMULATE_HANG = True yield route_check.TIMEOUT_SECONDS = old_timeout - mock_selector.EMULATE_HANG = False + MockSelector.EMULATE_HANG = False @pytest.fixture def mock_dbs(self): - mock_config_db = MagicMock() with patch("route_check.swsscommon.DBConnector") as mock_conn, \ patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ + patch("sonic_py_common.multi_asic.connect_config_db_for_ns") as mock_config_db, \ patch("route_check.swsscommon.NotificationProducer"): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) @@ -268,42 +232,53 @@ def mock_dbs(self): @pytest.mark.parametrize("test_num", TEST_DATA.keys()) def test_route_check(self, mock_dbs, test_num): + logger.debug("test_route_check: test_num={}".format(test_num)) self.init() ret = 0 - ct_data = TEST_DATA[test_num] set_test_case_data(ct_data) - logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) + self.run_test(ct_data) + def run_test(self, ct_data): with patch('sys.argv', ct_data[ARGS].split()), \ - patch('route_check.subprocess.check_output') as mock_check_output: + patch('sonic_py_common.multi_asic.get_namespace_list', return_value= ct_data[NAMESPACE]), \ + patch('sonic_py_common.multi_asic.is_multi_asic', return_value= ct_data[MULTI_ASIC]), \ + patch('route_check.subprocess.check_output', side_effect=lambda *args, **kwargs: self.mock_check_output(ct_data, *args, **kwargs)), \ + patch('route_check.mitigate_installed_not_offloaded_frr_routes', side_effect=lambda *args, **kwargs: None), \ + patch('route_check.load_db_config', side_effect=lambda: init_db_conns(ct_data[NAMESPACE])): - routes = ct_data.get(FRR_ROUTES, {}) + ret, res = route_check.main() + self.assert_results(ct_data, ret, res) - def side_effect(*args, **kwargs): - return json.dumps(routes) + def mock_check_output(self, ct_data, *args, **kwargs): + ns = self.extract_namespace_from_args(args[0]) + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) - mock_check_output.side_effect = side_effect + def assert_results(self, ct_data, ret, res): + expect_ret = ct_data.get(RET, 0) + expect_res = ct_data.get(RESULT, None) - ret, res = route_check.main() - expect_ret = ct_data[RET] if RET in ct_data else 0 - expect_res = ct_data[RESULT] if RESULT in ct_data else None - if res: - print("res={}".format(json.dumps(res, indent=4))) - if expect_res: - print("expect_res={}".format(json.dumps(expect_res, indent=4))) - assert ret == expect_ret - assert res == expect_res + if res: + logger.debug("res={}".format(json.dumps(res, indent=4))) + if expect_res: + logger.debug("expect_res={}".format(json.dumps(expect_res, indent=4))) + + assert ret == expect_ret + assert res == expect_res def test_timeout(self, mock_dbs, force_hang): # Test timeout ex_raised = False # Use an expected failing test case to trigger the select - set_test_case_data(TEST_DATA['2']) - + ct_data = TEST_DATA['2'] + set_test_case_data(ct_data) try: - with patch('sys.argv', [route_check.__file__.split('/')[-1]]): + with patch('sys.argv', [route_check.__file__.split('/')[-1]]), \ + patch('route_check.load_db_config', side_effect=lambda: init_db_conns(ct_data[NAMESPACE])): + ret, res = route_check.main() + except Exception as err: ex_raised = True expect = "timeout occurred" @@ -324,9 +299,11 @@ def test_logging(self): assert len(msg) == 5 def test_mitigate_routes(self, mock_dbs): + namespace = DEFAULTNS missed_frr_rt = [ { 'prefix': '192.168.0.1', 'protocol': 'bgp' } ] rt_appl = [ '192.168.0.1' ] + init_db_conns([namespace]) with patch('sys.stdout', new_callable=StringIO) as mock_stdout: - route_check.mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl) + route_check.mitigate_installed_not_offloaded_frr_routes(namespace, missed_frr_rt, rt_appl) # Verify that the stdout are suppressed in this function assert not mock_stdout.getvalue() diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 9250c54ca90..c5a606cb901 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -1,4 +1,6 @@ DESCR = "Description" +MULTI_ASIC = "multi_asic" +NAMESPACE = "namespace-list" ARGS = "args" RET = "return" APPL_DB = 0 @@ -9,6 +11,9 @@ UPD = "update" FRR_ROUTES = "frr-routes" RESULT = "res" +DEFAULTNS="" +ASIC0 = "asic0" +ASIC1 = "asic1" OP_SET = "SET" OP_DEL = "DEL" @@ -18,6 +23,7 @@ VNET_ROUTE_TABLE = 'VNET_ROUTE_TABLE' INTF_TABLE = 'INTF_TABLE' RT_ENTRY_TABLE = 'ASIC_STATE' +FEATURE_TABLE = 'FEATURE' SEPARATOR = ":" DEVICE_METADATA = "DEVICE_METADATA" MUX_CABLE = "MUX_CABLE" @@ -27,71 +33,91 @@ RT_ENTRY_KEY_PREFIX = 'SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest":\"' RT_ENTRY_KEY_SUFFIX = '\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000023\"}' -DEFAULT_CONFIG_DB = {DEVICE_METADATA: {LOCALHOST: {}}} +DEFAULT_CONFIG_DB = { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } + } + } TEST_DATA = { "0": { DESCR: "basic good one", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "1": { DESCR: "With updates", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m DEBUG -i 1", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, UPD: { - ASIC_DB: { - RT_ENTRY_TABLE: { - OP_SET: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - }, - OP_DEL: { - RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + ASIC_DB: { + RT_ENTRY_TABLE: { + OP_SET: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + }, + OP_DEL: { + RT_ENTRY_KEY_PREFIX + "10.10.10.10/32" + RT_ENTRY_KEY_SUFFIX: {} + } } } } @@ -99,506 +125,987 @@ }, "2": { DESCR: "basic failure one", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -i 15", RET: -1, PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - INTF_TABLE: { - "PortChannel1013:90.10.196.24/31": {}, - "PortChannel1023:9603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "3603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:90.10.196.24/31": {}, + "PortChannel1023:9603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "3603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.12/31", - "10.10.196.20/31" - ], - "missed_INTF_TABLE_entries": [ - "90.10.196.24/32", - "9603:10b0:503:df4::5d/128" - ], - "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ - "20.10.196.12/31", - "20.10.196.20/31", - "20.10.196.24/32", - "3603:10b0:503:df4::5d/128" - ] + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31", + "10.10.196.20/31" + ], + "missed_INTF_TABLE_entries": [ + "90.10.196.24/32", + "9603:10b0:503:df4::5d/128" + ], + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "20.10.196.12/31", + "20.10.196.20/31", + "20.10.196.24/32", + "3603:10b0:503:df4::5d/128" + ] + } } }, "3": { DESCR: "basic good one with no args", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "4": { DESCR: "Good one with routes on voq inband interface", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" }, - "10.10.197.1" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0"}, - "2603:10b0:503:df5::1" : { "ifname": "Ethernet-IB0", "nexthop": "::"}, - "100.0.0.2/32" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0" }, - "2064:100::2/128" : { "ifname": "Ethernet-IB0", "nexthop": "::" }, - "101.0.0.0/24" : { "ifname": "Ethernet-IB0", "nexthop": "100.0.0.2"} - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {}, - "Ethernet-IB0:10.10.197.1/24": {}, - "Ethernet-IB0:2603:10b0:503:df5::1/64": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.197.1/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df5::1/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "101.0.0.0/24" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" }, + "10.10.197.1" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0"}, + "2603:10b0:503:df5::1" : { "ifname": "Ethernet-IB0", "nexthop": "::"}, + "100.0.0.2/32" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0" }, + "2064:100::2/128" : { "ifname": "Ethernet-IB0", "nexthop": "::" }, + "101.0.0.0/24" : { "ifname": "Ethernet-IB0", "nexthop": "100.0.0.2"} + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {}, + "Ethernet-IB0:10.10.197.1/24": {}, + "Ethernet-IB0:2603:10b0:503:df5::1/64": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.197.1/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df5::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "101.0.0.0/24" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "5": { DESCR: "local route with nexthop - fail", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", RET: -1, PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo", "nexthop": "100.0.0.2" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo", "nexthop": "100.0.0.2" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.30/31" - ] + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.30/31" + ] + } } }, "6": { DESCR: "Good one with VNET routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "10.10.196.30/31" : { "ifname": "lo" } - }, - VNET_ROUTE_TABLE: { - "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, - "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, - "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {}, - "Vlan3001": { "vnet_name": "Vnet1" }, - "Vlan3001:30.1.10.1/24": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "30.1.10.1/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {}, + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "30.1.10.1/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "7": { DESCR: "dualtor standalone tunnel route case", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} - } - }, - APPL_DB: { - NEIGH_TABLE: { - "Vlan1000:fc02:1000::99": { "neigh": "00:00:00:00:00:00", "family": "IPv6"} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "fc02:1000::99/128" + RT_ENTRY_KEY_SUFFIX: {}, + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } + } + }, + APPL_DB: { + NEIGH_TABLE: { + "Vlan1000:fc02:1000::99": { "neigh": "00:00:00:00:00:00", "family": "IPv6"} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "fc02:1000::99/128" + RT_ENTRY_KEY_SUFFIX: {}, + } } } } }, "8": { DESCR: "Good one with VRF routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "Vrf1:0.0.0.0/0" : { "ifname": "portchannel0" }, - "Vrf1:10.10.196.12/31" : { "ifname": "portchannel0" }, - "Vrf1:10.10.196.20/31" : { "ifname": "portchannel0" } - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "Vrf1:0.0.0.0/0" : { "ifname": "portchannel0" }, + "Vrf1:10.10.196.12/31" : { "ifname": "portchannel0" }, + "Vrf1:10.10.196.20/31" : { "ifname": "portchannel0" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "9": { DESCR: "SOC IPs on Libra ToRs should be ignored", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check", PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} - }, - MUX_CABLE: { - "Ethernet4": { - "cable_type": "active-active", - "server_ipv4": "192.168.0.2/32", - "server_ipv6": "fc02:1000::2/128", - "soc_ipv4": "192.168.0.3/32", - "soc_ipv6": "fc02:1000::3/128", - "state": "auto" + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} }, - } - }, - APPL_DB: { - ROUTE_TABLE: { - "192.168.0.2/32": {"ifname": "tun0"}, - "fc02:1000::2/128": {"ifname": "tun0"} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "192.168.0.2/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "fc02:1000::2/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.3/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "fc02:1000::3/128" + RT_ENTRY_KEY_SUFFIX: {} + MUX_CABLE: { + "Ethernet4": { + "cable_type": "active-active", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::2/128", + "soc_ipv4": "192.168.0.3/32", + "soc_ipv6": "fc02:1000::3/128", + "state": "auto" + }, + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "192.168.0.2/32": {"ifname": "tun0"}, + "fc02:1000::2/128": {"ifname": "tun0"} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "192.168.0.2/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "fc02:1000::2/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.3/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "fc02:1000::3/128" + RT_ENTRY_KEY_SUFFIX: {} + } } } } }, "10": { DESCR: "basic good one, check FRR routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + } }, }, "11": { DESCR: "failure test case, missing FRR routes", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - }, - ], - "1.1.1.0/24": [ - { - "prefix": "1.1.1.0/24", - "vrfName": "default", - "protocol": "static", - "selected": True, - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + }, + ], + "1.1.1.0/24": [ + { + "prefix": "1.1.1.0/24", + "vrfName": "default", + "protocol": "static", + "selected": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, }, RESULT: { - "missed_FRR_routes": [ - {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True}, - {"prefix": "1.1.1.0/24", "vrfName": "default", "protocol": "static", "selected": True}, - ], + DEFAULTNS: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True}, + {"prefix": "1.1.1.0/24", "vrfName": "default", "protocol": "static", "selected": True}, + ], + }, }, RET: -1, }, "12": { DESCR: "skip bgp routes offloaded check, if not selected as best", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { - "0.0.0.0/0" : { "ifname": "portchannel0" }, - "10.10.196.12/31" : { "ifname": "portchannel0" }, - }, - INTF_TABLE: { - "PortChannel1013:10.10.196.24/31": {}, - "PortChannel1023:2603:10b0:503:df4::5d/126": {}, - "PortChannel1024": {} - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} - } + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, }, }, FRR_ROUTES: { - "0.0.0.0/0": [ - { - "prefix": "0.0.0.0/0", - "vrfName": "default", - "protocol": "bgp", - "selected": True, - "offloaded": True, - }, - ], - "10.10.196.12/31": [ - { - "prefix": "10.10.196.12/31", - "vrfName": "default", - "protocol": "bgp", - }, - ], - "10.10.196.24/31": [ - { - "protocol": "connected", - "selected": True, - }, - ], + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, }, }, "13": { DESCR: "basic good one with IPv6 address", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -m INFO -i 1000", PRE: { - APPL_DB: { - ROUTE_TABLE: { + DEFAULTNS: { + APPL_DB: { + ROUTE_TABLE: { + }, + INTF_TABLE: { + "PortChannel1013:2000:31:0:0::1/64": {}, + } }, - INTF_TABLE: { - "PortChannel1013:2000:31:0:0::1/64": {}, - } - }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + } } } } }, "14": { DESCR: "dualtor ignore vlan neighbor route miss case", + MULTI_ASIC: False, + NAMESPACE: [''], ARGS: "route_check -i 15", RET: -1, PRE: { - CONFIG_DB: { - DEVICE_METADATA: { - LOCALHOST: {"subtype": "DualToR"} + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: {"subtype": "DualToR"} + }, + FEATURE_TABLE: { + "bgp": { + "state": "enabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "192.168.0.101/32": { "ifname": "tun0" }, + "192.168.0.103/32": { "ifname": "tun0" }, + }, + INTF_TABLE: { + "PortChannel1013:90.10.196.24/31": {}, + "PortChannel1023:9603:10b0:503:df4::5d/126": {}, + }, + NEIGH_TABLE: { + "Vlan1000:192.168.0.100": {}, + "Vlan1000:192.168.0.101": {}, + "Vlan1000:192.168.0.102": {}, + "Vlan1000:192.168.0.103": {}, + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.101/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "192.168.0.102/32" + RT_ENTRY_KEY_SUFFIX: {}, + } } - }, - APPL_DB: { - ROUTE_TABLE: { - "10.10.196.12/31" : { "ifname": "portchannel0" }, - "10.10.196.20/31" : { "ifname": "portchannel0" }, - "192.168.0.101/32": { "ifname": "tun0" }, - "192.168.0.103/32": { "ifname": "tun0" }, - }, - INTF_TABLE: { - "PortChannel1013:90.10.196.24/31": {}, - "PortChannel1023:9603:10b0:503:df4::5d/126": {}, - }, - NEIGH_TABLE: { - "Vlan1000:192.168.0.100": {}, - "Vlan1000:192.168.0.101": {}, - "Vlan1000:192.168.0.102": {}, - "Vlan1000:192.168.0.103": {}, + } + }, + RESULT: { + DEFAULTNS: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31", + "10.10.196.20/31" + ], + "missed_INTF_TABLE_entries": [ + "90.10.196.24/32", + "9603:10b0:503:df4::5d/128" + ], + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "20.10.196.12/31", + "20.10.196.20/31", + "20.10.196.24/32", + ] + } + } + }, + "15": { + DESCR: "basic good one on multi-asic on a particular asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic0 -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + } + } + }, + "16": { + DESCR: "basic good one on multi-asic on all asics", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } }, - ASIC_DB: { - RT_ENTRY_TABLE: { - RT_ENTRY_KEY_PREFIX + "20.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "20.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.101/32" + RT_ENTRY_KEY_SUFFIX: {}, - RT_ENTRY_KEY_PREFIX + "192.168.0.102/32" + RT_ENTRY_KEY_SUFFIX: {}, + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + } + }, + "17": { + DESCR: "simple failure case on multi-asic on a particular asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic0 -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } } } }, RESULT: { - "missed_ROUTE_TABLE_routes": [ - "10.10.196.12/31", - "10.10.196.20/31" - ], - "missed_INTF_TABLE_entries": [ - "90.10.196.24/32", - "9603:10b0:503:df4::5d/128" - ], - "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ - "20.10.196.12/31", - "20.10.196.20/31", - "20.10.196.24/32", - ] - } + ASIC0: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31" + ], + } + }, + RET: -1, + }, + "18": { + DESCR: "simple failure case on multi-asic on all asics", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + }, + RESULT: { + ASIC0: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.12/31" + ], + }, + ASIC1: { + "Unaccounted_ROUTE_ENTRY_TABLE_entries": [ + "10.10.196.12/31" + ], + }, + }, + RET: -1, + }, + "19": { + DESCR: "validate namespace input on multi-asic", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n random -m INFO -i 1000", + RET: -1, + }, + "20": { + DESCR: "validate namespace input on single-asic", + MULTI_ASIC: False, + NAMESPACE: [''], + ARGS: "route_check -n random -m INFO -i 1000", + RET: -1, + }, + "21": { + DESCR: "multi-asic failure test case, missing FRR routes", + MULTI_ASIC: True, + NAMESPACE: ['asic0', 'asic1'], + ARGS: "route_check -n asic1 -m INFO -i 1000", + PRE: { + ASIC1: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + ASIC1: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + "offloaded": True, + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "selected": True, + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + "selected": True, + }, + ], + }, + }, + RESULT: { + ASIC1: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp", "selected": True} + ], + }, + }, + RET: -1, + }, + "22": { + DESCR: "basic good one on single asic, bgp disabled", + MULTI_ASIC: False, + NAMESPACE: [''], + ARGS: "route_check -m INFO -i 1000", + PRE: { + DEFAULTNS: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "disabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + DEFAULTNS: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + }, + "23": { + DESCR: "basic good one on multi-asic, bgp disabled", + MULTI_ASIC: True, + NAMESPACE: ['asic0'], + ARGS: "route_check -m INFO -i 1000", + PRE: { + ASIC0: { + CONFIG_DB: { + DEVICE_METADATA: { + LOCALHOST: { + } + }, + FEATURE_TABLE: { + "bgp": { + "state": "disabled" + } + } + }, + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + }, + FRR_ROUTES: { + ASIC0: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, }, } diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 37a025a35c9..51a5a1f23e3 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -304,6 +304,7 @@ test_qsfp_dd_status_output = """\ Ethernet44: + CMIS State (SW): READY Tx fault flag on media lane 1: False Tx fault flag on media lane 2: False Tx fault flag on media lane 3: False @@ -599,6 +600,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A @@ -689,6 +696,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A @@ -779,6 +792,12 @@ 100GAUI-2 C2M (Annex 135G) - Host Assign (0x55) - 400ZR, DWDM, amplified - Media Assign (0x1) CMIS Rev: 4.1 Connector: LC + E1 Active Firmware: X.X + E1 Inactive Firmware: Y.Y + E1 Server Firmware: A.B.C.D + E2 Active Firmware: X.X + E2 Inactive Firmware: Y.Y + E2 Server Firmware: A.B.C.D Encoding: N/A Extended Identifier: Power Class 8 (20.0W Max) Extended RateSelect Compliance: N/A @@ -884,6 +903,28 @@ def test_sfp_presence(self): assert result.exit_code == 0 assert result.output == expected + def test_sfp_dpc_ports(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"]) + assert result.exit_code == 0 + assert "Ethernet24" not in result.output + def test_sfp_eeprom_with_dom(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0", "-d"]) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 63814f31c5a..523848ec453 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -151,8 +151,6 @@ def test_format_dict_value_to_string(self): 'specification_compliance': "sm_media_interface", 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no', 'Voltage_support': 'no', 'Temp_support': 'no'}", 'nominal_bit_rate': '0', - 'active_firmware': '0.1', - 'inactive_firmware': '0.0', 'hardware_rev': '0.0', 'media_interface_code': '400ZR, DWDM, amplified', 'host_electrical_interface': '400GAUI-8 C2M (Annex 120E)', @@ -184,7 +182,6 @@ def test_format_dict_value_to_string(self): " Active App Selection Host Lane 6: 1\n" " Active App Selection Host Lane 7: 1\n" " Active App Selection Host Lane 8: 1\n" - " Active Firmware Version: 0.1\n" " Application Advertisement: 400G CR8 - Host Assign (0x1) - Copper cable - Media Assign (0x2)\n" " 200GBASE-CR4 (Clause 136) - Host Assign (Unknown) - Unknown - Media Assign (Unknown)\n" " CMIS Revision: 5.0\n" @@ -197,7 +194,6 @@ def test_format_dict_value_to_string(self): " Host Lane Assignment Options: 1\n" " Host Lane Count: 8\n" " Identifier: QSFP-DD Double Density 8X Pluggable Transceiver\n" - " Inactive Firmware Version: 0.0\n" " Length Cable Assembly(m): 0\n" " Media Interface Code: 400ZR, DWDM, amplified\n" " Media Interface Technology: C-band tunable laser\n" @@ -673,6 +669,96 @@ def test_show_eeprom_hexdump_read_eeprom_not_implemented(self, mock_chassis): expected_output = "Sfp.read_eeprom() is currently not implemented for this platform\n" assert result.output == expected_output + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.isinstance', MagicMock(return_value=True)) + def test_show_eeprom_hexdump_processing_hex_page_number(self, mock_chassis): + lower_page_bytearray = bytearray([13, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + upper_page0_bytearray = bytearray([13, 0, 35, 8, 0, 0, 0, 65, 128, 128, 245, 0, 0, 0, 0, 0, 0, 0, 1, 160, 77, 111, 108, 101, 120, 32, 73, 110, 99, 46, 32, 32, 32, 32, 32, 32, 7, 0, 9, 58, 49, 49, 49, 48, 52, 48, 49, 48, 53, 52, 32, 32, 32, 32, 32, 32, 32, 32, 3, 4, 0, 0, 70, 196, 0, 0, 0, 0, 54, 49, 49, 48, 51, 48, 57, 50, 57, 32, 32, 32, 32, 32, 32, 32, 49, 54, 48, 52, 49, 57, 32, 32, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + page10_expected_output = '''EEPROM hexdump for port Ethernet0 page 10h + Lower page 0h + 00000000 0d 00 06 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000010 00 00 00 00 00 00 01 81 00 00 00 00 00 00 00 00 |................| + 00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 0h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 10h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + +''' + page11_expected_output = '''EEPROM hexdump for port Ethernet0 page 11h + Lower page 0h + 00000000 0d 00 06 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000010 00 00 00 00 00 00 01 81 00 00 00 00 00 00 00 00 |................| + 00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 00000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 0h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + + Upper page 11h + 00000080 0d 00 23 08 00 00 00 41 80 80 f5 00 00 00 00 00 |..#....A........| + 00000090 00 00 01 a0 4d 6f 6c 65 78 20 49 6e 63 2e 20 20 |....Molex Inc. | + 000000a0 20 20 20 20 07 00 09 3a 31 31 31 30 34 30 31 30 | ...:11104010| + 000000b0 35 34 20 20 20 20 20 20 20 20 03 04 00 00 46 c4 |54 ....F.| + 000000c0 00 00 00 00 36 31 31 30 33 30 39 32 39 20 20 20 |....611030929 | + 000000d0 20 20 20 20 31 36 30 34 31 39 20 20 00 00 00 24 | 160419 ...$| + 000000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + 000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| + +''' + def side_effect(offset, num_bytes): + if offset == 0: + return lower_page_bytearray + else: + return upper_page0_bytearray + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.read_eeprom = MagicMock(side_effect=side_effect) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom-hexdump'], ["-p", "Ethernet0", "-n", "10"]) + assert result.exit_code == 0 + assert result.output == page10_expected_output + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom-hexdump'], ["-p", "Ethernet0", "-n", "11"]) + assert result.exit_code == 0 + assert result.output == page11_expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) @@ -1113,7 +1199,7 @@ def test_firmware_commit_cli(self): def test_update_firmware_info_to_state_db(self, mock_chassis): mock_sfp = MagicMock() mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) - mock_sfp.get_transceiver_info_firmware_versions.return_value = ['a.b.c', 'd.e.f'] + mock_sfp.get_transceiver_info_firmware_versions.return_value = {'active_firmware' : 'a.b.c', 'inactive_firmware' : 'd.e.f'} sfputil.update_firmware_info_to_state_db("Ethernet0") @@ -1412,3 +1498,15 @@ def test_target_firmware(self, mock_chassis): result = runner.invoke(sfputil.cli.commands['firmware'].commands['target'], ["Ethernet0", "1"]) assert result.output == 'Target Mode set failed!\n' assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.multi_asic.is_multi_asic') + @patch('sfputil.main.platform_sfputil', MagicMock()) + @patch('sfputil.main.device_info.get_paths_to_platform_and_hwsku_dirs', + MagicMock(return_value=(None, None))) + @patch('sfputil.main.device_info.get_path_to_port_config_file', MagicMock(return_value=(''))) + def test_load_port_config(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True + assert sfputil.load_port_config() == True + + mock_is_multi_asic.return_value = False + assert sfputil.load_port_config() == True diff --git a/tests/show_test.py b/tests/show_test.py index 5b55c15896b..4cd29ac45e5 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -2,8 +2,10 @@ import sys import click import pytest +import importlib import subprocess import show.main as show +import utilities_common.bgp_util as bgp_util from unittest import mock from click.testing import CliRunner from utilities_common import constants @@ -32,6 +34,12 @@ class TestShowRunAllCommands(object): def setup_class(cls): print("SETUP") os.environ["UTILITIES_UNIT_TESTING"] = "1" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + + def mock_run_bgp_command(): + return "" def test_show_runningconfiguration_all_json_loads_failure(self): def get_cmd_output_side_effect(*args, **kwargs): @@ -55,16 +63,63 @@ def get_cmd_output_side_effect(*args, **kwargs): with mock.patch('show.main.get_cmd_output', mock.MagicMock(side_effect=get_cmd_output_side_effect)) as mock_get_cmd_output: result = CliRunner().invoke(show.cli.commands['runningconfiguration'].commands['all'], []) - assert mock_get_cmd_output.call_count == 2 + assert result.exit_code == 0 + assert mock_get_cmd_output.call_count == 1 + assert mock_get_cmd_output.call_args_list == [ + call(['sonic-cfggen', '-d', '--print-data'])] + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + bgp_util.run_bgp_command = cls._old_run_bgp_command + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + +class TestShowRunAllCommandsMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def mock_run_bgp_command(): + return "" + + def test_show_runningconfiguration_all_masic(self): + def get_cmd_output_side_effect(*args, **kwargs): + return "{}", 0 + with mock.patch('show.main.get_cmd_output', + mock.MagicMock(side_effect=get_cmd_output_side_effect)) as mock_get_cmd_output: + result = CliRunner().invoke(show.cli.commands['runningconfiguration'].commands['all'], []) + assert result.exit_code == 0 + assert mock_get_cmd_output.call_count == 3 assert mock_get_cmd_output.call_args_list == [ call(['sonic-cfggen', '-d', '--print-data']), - call(['rvtysh', '-c', 'show running-config'])] + call(['sonic-cfggen', '-d', '--print-data', '-n', 'asic0']), + call(['sonic-cfggen', '-d', '--print-data', '-n', 'asic1'])] @classmethod def teardown_class(cls): print("TEARDOWN") + bgp_util.run_bgp_command = cls._old_run_bgp_command os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + @patch('show.main.run_command') @pytest.mark.parametrize( diff --git a/tests/syslog_multi_asic_test.py b/tests/syslog_multi_asic_test.py new file mode 100644 index 00000000000..7933edcd669 --- /dev/null +++ b/tests/syslog_multi_asic_test.py @@ -0,0 +1,281 @@ +import mock +import pytest +from click.testing import CliRunner +from importlib import reload +from utilities_common.db import Db + +show_all_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +pmon 100 10000 + + +Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 111 33333 +database 222 22222 + + +Namespace asic1: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 444 44444 +database 555 55555 +""" + +show_global_ns_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +pmon 100 10000 +""" + +show_asic0_ns_config = """Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +bgp 111 33333 +database 222 22222 +""" + +show_all_ns_database_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 + + +Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 222 22222 + + +Namespace asic1: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 555 55555 +""" + +show_global_ns_database_config = """SERVICE INTERVAL BURST +--------- ---------- ------- +database 200 20000 +""" + +show_asic0_ns_database_config = """Namespace asic0: +SERVICE INTERVAL BURST +--------- ---------- ------- +database 222 22222 +""" + + +@pytest.fixture(scope='module') +def setup_cmd_module(): + # Mock to multi ASIC + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + + import show.main as show + import config.main as config + + # Refresh syslog module for show and config + import show.syslog as show_syslog + reload(show_syslog) + show.cli.add_command(show_syslog.syslog) + + import config.syslog as config_syslog + reload(config_syslog) + config.config.add_command(config_syslog.syslog) + + yield show, config + + # Mock back to single ASIC + from .mock_tables import mock_single_asic + reload(mock_single_asic) + + # Refresh syslog module for show and config + reload(show_syslog) + show.cli.add_command(show_syslog.syslog) + + reload(config_syslog) + config.config.add_command(config_syslog.syslog) + + +class TestSyslogRateLimitMultiAsic: + def test_show_rate_limit_container(self, setup_cmd_module): + show, _ = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], + [] + ) + + assert result.output == show_all_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["-n", "default"] + ) + + assert result.output == show_global_ns_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["-n", "asic0"] + ) + + assert result.output == show_asic0_ns_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database"] + ) + + assert result.output == show_all_ns_database_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database", "-n", "default"] + ) + + assert result.output == show_global_ns_database_config + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["syslog"].commands["rate-limit-container"], ["database", "-n", "asic0"] + ) + + assert result.output == show_asic0_ns_database_config + assert result.exit_code == 0 + + def test_config_rate_limit_container(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + db = Db() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["database", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for cfg_db in db.cfgdb_clients.values(): + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'database') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["bgp", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace != '': + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'bgp') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + else: + table = cfg_db.get_table('SYSLOG_CONFIG_FEATURE') + assert 'bgp' not in table + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["pmon", "--interval", 1, "--burst", 100], obj=db + ) + assert result.exit_code == 0 + for namespace, cfg_db in db.cfgdb_clients.items(): + if namespace == '': + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'pmon') + assert data['rate_limit_burst'] == '100' + assert data['rate_limit_interval'] == '1' + else: + table = cfg_db.get_table('SYSLOG_CONFIG_FEATURE') + assert 'pmon' not in table + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-container"], + ["pmon", "--interval", 2, "--burst", 200, "-n", "default"], obj=db + ) + assert result.exit_code == 0 + cfg_db = db.cfgdb_clients[''] + data = cfg_db.get_entry('SYSLOG_CONFIG_FEATURE', 'pmon') + assert data['rate_limit_burst'] == '200' + assert data['rate_limit_interval'] == '2' + + @mock.patch('config.syslog.clicommon.run_command', mock.MagicMock(return_value=('', 0))) + def test_enable_syslog_rate_limit_feature(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], [] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['-n', 'asic0'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], ['database'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['database', '-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['enable'], + ['database', '-n', 'asic0'] + ) + assert result.exit_code == 0 + + @mock.patch('config.syslog.clicommon.run_command', mock.MagicMock(return_value=('', 0))) + def test_disable_syslog_rate_limit_feature(self, setup_cmd_module): + _, config = setup_cmd_module + + runner = CliRunner() + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], [] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['-n', 'asic0'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], ['database'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['database', '-n', 'default'] + ) + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["syslog"].commands["rate-limit-feature"].commands['disable'], + ['database', '-n', 'asic0'] + ) + assert result.exit_code == 0 diff --git a/tests/syslog_test.py b/tests/syslog_test.py index 44915b6d369..c1cbee11273 100644 --- a/tests/syslog_test.py +++ b/tests/syslog_test.py @@ -484,4 +484,3 @@ def side_effect(*args, **kwargs): config.config.commands["syslog"].commands["rate-limit-feature"].commands["disable"], obj=db ) assert result.exit_code == SUCCESS - diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 436e309281d..5a84737b2ac 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -368,13 +368,17 @@ def test_config_vlan_del_vlan(self, mock_restart_dhcp_relay_service): assert "Error: Vlan1000 can not be removed. First remove IP addresses assigned to this VLAN" in result.output # remove vlan IP`s - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "192.168.0.1/21"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "192.168.0.1/21"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "fc02:1000::1/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], ["Vlan1000", "fc02:1000::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 # del vlan with IP result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1000"], obj=db) @@ -778,15 +782,19 @@ def test_config_vlan_del_dhcp_relay_restart(self): obj = {"config_db": db.cfgdb} # remove vlan IP`s - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], - ["Vlan1000", "192.168.0.1/21"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Vlan1000", "192.168.0.1/21"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 - result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], - ["Vlan1000", "fc02:1000::1/64"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code != 0 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Vlan1000", "fc02:1000::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 # remove vlan members vlan_member = db.cfgdb.get_table("VLAN_MEMBER") diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 64054662e32..65f9a594963 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -38,6 +38,15 @@ def is_bgp_neigh_present(neighbor_ip, namespace=multi_asic.DEFAULT_NAMESPACE): return False +def is_bgp_feature_state_enabled(namespace=multi_asic.DEFAULT_NAMESPACE): + config_db = multi_asic.connect_config_db_for_ns(namespace) + bgp= config_db.get_entry("FEATURE","bgp") + if "state" in bgp: + if bgp["state"] == "enabled": + return True + return False + + def is_ipv4_address(ip_address): """ Checks if given ip is ipv4 diff --git a/utilities_common/platform_sfputil_helper.py b/utilities_common/platform_sfputil_helper.py index 0de83f05b02..741b5334ecc 100644 --- a/utilities_common/platform_sfputil_helper.py +++ b/utilities_common/platform_sfputil_helper.py @@ -121,10 +121,10 @@ def is_rj45_port(port_name): if not platform_sfp_base: import sonic_platform_base platform_sfp_base = sonic_platform_base.sfp_base.SfpBase - except ModuleNotFoundError as e: + except (ModuleNotFoundError, FileNotFoundError) as e: # This method is referenced by intfutil which is called on vs image - # However, there is no platform API supported on vs image - # So False is returned in such case + # sonic_platform API support is added for vs image(required for chassis), it expects a metadata file, which + # wont be available on vs pizzabox duts, So False is returned(if either ModuleNotFound or FileNotFound) return False if platform_chassis and platform_sfp_base: diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index 09a96ca2ea1..e38e764c9c1 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -38,7 +38,13 @@ 'supported_max_tx_power': 'Supported Max TX Power', 'supported_min_tx_power': 'Supported Min TX Power', 'supported_max_laser_freq': 'Supported Max Laser Frequency', - 'supported_min_laser_freq': 'Supported Min Laser Frequency' + 'supported_min_laser_freq': 'Supported Min Laser Frequency', + 'e1_active_firmware': 'E1 Active Firmware', + 'e1_inactive_firmware': 'E1 Inactive Firmware', + 'e1_server_firmware': 'E1 Server Firmware', + 'e2_active_firmware': 'E2 Active Firmware', + 'e2_inactive_firmware': 'E2 Inactive Firmware', + 'e2_server_firmware': 'E2 Server Firmware' } CMIS_DATA_MAP = {**QSFP_DATA_MAP, **QSFP_CMIS_DELTA_DATA_MAP} @@ -47,6 +53,7 @@ # For non-CMIS, only first 1 or 4 lanes are applicable. # For CMIS, all 8 lanes are applicable. QSFP_STATUS_MAP = { + 'cmis_state': 'CMIS State (SW)', 'txfault1': 'Tx fault flag on media lane 1', 'txfault2': 'Tx fault flag on media lane 2', 'txfault3': 'Tx fault flag on media lane 3',