Skip to content

Commit

Permalink
Merge branch 'sonic-net:master' into dev-cli-sessions
Browse files Browse the repository at this point in the history
  • Loading branch information
i-davydenko authored Apr 30, 2024
2 parents 3004835 + 099d40c commit a5f9308
Show file tree
Hide file tree
Showing 91 changed files with 4,470 additions and 1,086 deletions.
35 changes: 35 additions & 0 deletions .azure-pipelines/pre-commit-check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
steps:
- checkout: self
clean: true
displayName: 'checkout sonic-utilities repo'

- script: |
set -x
sudo pip install pre-commit
pre-commit install-hooks
displayName: 'Prepare pre-commit check'

- script: |
# Run pre-commit check and capture the output
out=`pre-commit run --color never --from-ref HEAD^ --to-ref HEAD 2>&1`
RC=$?
if [[ $RC -ne 0 ]]; then
echo -e "The [pre-commit](http://pre-commit.com/) check detected issues in the files touched by this pull request.\n\
The pre-commit check is a mandatory check, please fix detected issues.\n\
\n\
To run the pre-commit checks locally, you can follow below steps:\n\
1. Ensure that default python is python3.\n\
2. Ensure that the 'pre-commit' package is installed:\n\
sudo pip install pre-commit\n\
3. Go to repository root folder\n\
4. Install the pre-commit hooks:\n\
pre-commit install\n\
5. Use pre-commit to check staged file:\n\
pre-commit\n\
6. Alternatively, you can check committed files using:\n\
pre-commit run --from-ref <commit_id> --to-ref <commit_id>\n"
fi
echo "Pre-commit check results:"
echo "$out"
exit $RC
displayName: 'Run pre-commit check'
2 changes: 1 addition & 1 deletion .github/workflows/semgrep.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ jobs:
- uses: actions/checkout@v3
- run: semgrep ci
env:
SEMGREP_RULES: p/default
SEMGREP_RULES: "p/default r/python.lang.security.audit.dangerous-system-call-audit.dangerous-system-call-audit"
9 changes: 9 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
hooks:
- id: flake8
entry: bash -c 'git diff HEAD^ HEAD -U0 -- "$@" | flake8 --diff "$@"' --
args: ["--max-line-length=120"]
28 changes: 17 additions & 11 deletions acl_loader/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,17 +413,17 @@ def parse_acl_json(filename):
raise AclLoaderException("Invalid input file %s" % filename)
return yang_acl

def load_rules_from_file(self, filename):
def load_rules_from_file(self, filename, skip_action_validation=False):
"""
Load file with ACL rules configuration in openconfig ACL format. Convert rules
to Config DB schema.
:param filename: File in openconfig ACL format
:return:
"""
self.yang_acl = AclLoader.parse_acl_json(filename)
self.convert_rules()
self.convert_rules(skip_action_validation)

def convert_action(self, table_name, rule_idx, rule):
def convert_action(self, table_name, rule_idx, rule, skip_validation=False):
rule_props = {}

if rule.actions.config.forwarding_action == "ACCEPT":
Expand Down Expand Up @@ -452,13 +452,13 @@ def convert_action(self, table_name, rule_idx, rule):
raise AclLoaderException("Unknown rule action {} in table {}, rule {}".format(
rule.actions.config.forwarding_action, table_name, rule_idx))

if not self.validate_actions(table_name, rule_props):
if not self.validate_actions(table_name, rule_props, skip_validation):
raise AclLoaderException("Rule action {} is not supported in table {}, rule {}".format(
rule.actions.config.forwarding_action, table_name, rule_idx))

return rule_props

def validate_actions(self, table_name, action_props):
def validate_actions(self, table_name, action_props, skip_validation=False):
if self.is_table_control_plane(table_name):
return True

Expand All @@ -481,6 +481,11 @@ def validate_actions(self, table_name, action_props):
else:
aclcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.ACL_STAGE_CAPABILITY_TABLE, stage.upper()))
switchcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE))
# In the load_minigraph path, it's possible that the STATE_DB entry haven't pop up because orchagent is stopped
# before loading acl.json. So we skip the validation if any table is empty
if skip_validation and (not aclcapability or not switchcapability):
warning("Skipped action validation as capability table is not present in STATE_DB")
return True
for action_key in dict(action_props):
action_list_key = self.ACL_ACTIONS_CAPABILITY_FIELD
if action_list_key not in aclcapability:
Expand Down Expand Up @@ -699,7 +704,7 @@ def validate_rule_fields(self, rule_props):
if ("ICMPV6_TYPE" in rule_props or "ICMPV6_CODE" in rule_props) and protocol != 58:
raise AclLoaderException("IP_PROTOCOL={} is not ICMPV6, but ICMPV6 fields were provided".format(protocol))

def convert_rule_to_db_schema(self, table_name, rule):
def convert_rule_to_db_schema(self, table_name, rule, skip_action_validation=False):
"""
Convert rules format from openconfig ACL to Config DB schema
:param table_name: ACL table name to which rule belong
Expand Down Expand Up @@ -729,7 +734,7 @@ def convert_rule_to_db_schema(self, table_name, rule):
elif self.is_table_l3(table_name):
rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"])

deep_update(rule_props, self.convert_action(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_action(table_name, rule_idx, rule, skip_action_validation))
deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_icmp(table_name, rule_idx, rule))
Expand Down Expand Up @@ -761,7 +766,7 @@ def deny_rule(self, table_name):
return {} # Don't add default deny rule if table is not [L3, L3V6]
return rule_data

def convert_rules(self):
def convert_rules(self, skip_aciton_validation=False):
"""
Convert rules in openconfig ACL format to Config DB schema
:return:
Expand All @@ -780,7 +785,7 @@ def convert_rules(self):
for acl_entry_name in acl_set.acl_entries.acl_entry:
acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name]
try:
rule = self.convert_rule_to_db_schema(table_name, acl_entry)
rule = self.convert_rule_to_db_schema(table_name, acl_entry, skip_aciton_validation)
deep_update(self.rules_info, rule)
except AclLoaderException as ex:
error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex))
Expand Down Expand Up @@ -1149,8 +1154,9 @@ def update(ctx):
@click.option('--session_name', type=click.STRING, required=False)
@click.option('--mirror_stage', type=click.Choice(["ingress", "egress"]), default="ingress")
@click.option('--max_priority', type=click.INT, required=False)
@click.option('--skip_action_validation', is_flag=True, default=False, help="Skip action validation")
@click.pass_context
def full(ctx, filename, table_name, session_name, mirror_stage, max_priority):
def full(ctx, filename, table_name, session_name, mirror_stage, max_priority, skip_action_validation):
"""
Full update of ACL rules configuration.
If a table_name is provided, the operation will be restricted in the specified table.
Expand All @@ -1168,7 +1174,7 @@ def full(ctx, filename, table_name, session_name, mirror_stage, max_priority):
if max_priority:
acl_loader.set_max_priority(max_priority)

acl_loader.load_rules_from_file(filename)
acl_loader.load_rules_from_file(filename, skip_action_validation)
acl_loader.full_update()


Expand Down
10 changes: 10 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@ variables:
value: $(Build.SourceBranchName)

stages:
- stage: Pretest
jobs:
- job: static_analysis
displayName: "Static Analysis"
timeoutInMinutes: 10
continueOnError: true
pool: ubuntu-20.04
steps:
- template: .azure-pipelines/pre-commit-check.yml

- stage: Build

jobs:
Expand Down
130 changes: 121 additions & 9 deletions config/fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
import utilities_common.cli as clicommon
import utilities_common.multi_asic as multi_asic_util
from sonic_py_common import multi_asic
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, APP_FABRIC_PORT_TABLE_NAME

FABRIC_PORT_STATUS_TABLE_PREFIX = APP_FABRIC_PORT_TABLE_NAME+"|"


#
# 'config fabric ...'
Expand Down Expand Up @@ -66,19 +69,13 @@ def isolate(portid, namespace):
#
@port.command()
@click.argument('portid', metavar='<portid>', required=True)
@click.option('-f', '--force', is_flag=True, default=False, help='Force to unisolate a link even if it is auto isolated.')
@multi_asic_util.multi_asic_click_option_namespace
def unisolate(portid, namespace):
def unisolate(portid, namespace, force):
"""FABRIC PORT unisolate <portid>"""

ctx = click.get_current_context()

if not portid.isdigit():
ctx.fail("Invalid portid")

n_asics = multi_asic.get_num_asics()
if n_asics > 1 and namespace is None:
ctx.fail('Must specify asic')

# Connect to config database
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
Expand All @@ -87,6 +84,37 @@ def unisolate(portid, namespace):
state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace)
state_db.connect(state_db.STATE_DB, False)

n_asics = multi_asic.get_num_asics()
if n_asics > 1 and namespace is None:
ctx.fail( 'Must specify asic' )

# If "all" is specified then unisolate all ports.
if portid == "all":
port_keys = state_db.keys(state_db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*')
for port_key in port_keys:
port_data = state_db.get_all(state_db.STATE_DB, port_key)
if "REMOTE_PORT" in port_data:
port_number = int( port_key.replace( "FABRIC_PORT_TABLE|PORT", "" ) )

# Make sure configuration data exists
portName = f'Fabric{port_number}'
portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName)
if not bool( portConfigData ):
ctx.fail( "Fabric monitor configuration data not present" )

# Update entry
config_db.mod_entry( "FABRIC_PORT", portName, {'isolateStatus': False} )
if force:
forceShutCnt = int( portConfigData['forceUnisolateStatus'] )
forceShutCnt += 1
config_db.mod_entry( "FABRIC_PORT", portName,
{'forceUnisolateStatus': forceShutCnt})

return

if not portid.isdigit():
ctx.fail( "Invalid portid" )

# check if the port is actually in use
portName = f'PORT{portid}'
portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName)
Expand All @@ -102,6 +130,15 @@ def unisolate(portid, namespace):
# Update entry
config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': False})

if force:
forceShutCnt = int( portConfigData['forceUnisolateStatus'] )
forceShutCnt += 1
config_db.mod_entry( "FABRIC_PORT", portName,
{'forceUnisolateStatus': forceShutCnt})

click.echo("Force unisolate the link.")
click.echo("It will clear all fabric link monitoring status for this link!")

#
# 'config fabric port monitor ...'
#
Expand Down Expand Up @@ -157,6 +194,39 @@ def error_threshold(crccells, rxcells, namespace):
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
{'monErrThreshCrcCells': crccells, 'monErrThreshRxCells': rxcells})

def setFabricPortMonitorState(state, namespace, ctx):
""" set the fabric port monitor state"""
# Connect to config database
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()

# Make sure configuration data exists
monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA")
if not bool(monitorData):
ctx.fail("Fabric monitor configuration data not present")

# Update entry
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
{'monState': state})

#
# 'config fabric port montior state <enable/disable>'
#
@monitor.command()
@click.argument('state', metavar='<state>', required=True)
@multi_asic_util.multi_asic_click_option_namespace
def state(state, namespace):
"""FABRIC PORT MONITOR STATE configuration tasks"""
ctx = click.get_current_context()

n_asics = multi_asic.get_num_asics()
if n_asics > 1 and namespace is None:
ns_list = multi_asic.get_namespace_list()
for namespace in ns_list:
setFabricPortMonitorState(state, namespace, ctx)
else:
setFabricPortMonitorState(state, namespace, ctx)

#
# 'config fabric port monitor poll ...'
#
Expand Down Expand Up @@ -245,3 +315,45 @@ def recovery(pollcount, namespace):
{"monPollThreshRecovery": pollcount})


#
# 'config fabric monitor ...'
#
@fabric.group(cls=clicommon.AbbreviationGroup, name='monitor')
def capacity_monitor():
"""FABRIC MONITOR configuration tasks"""
pass

#
# 'config fabric monitor capacity...'
#
@capacity_monitor.group(cls=clicommon.AbbreviationGroup)
def capacity():
"""FABRIC MONITOR CAPACITY configuration tasks"""
pass

#
# 'config fabric monitor capacity threshold <capcityThresh>'
#
@capacity.command()
@click.argument('capacitythreshold', metavar='<capacityThreshold>', required=True, type=int)
def threshold(capacitythreshold):
"""FABRIC CAPACITY MONITOR THRESHOLD configuration tasks"""
ctx = click.get_current_context()

if capacitythreshold < 5 or capacitythreshold > 250:
ctx.fail("threshold must be in range 5...250")

namespaces = multi_asic.get_namespace_list()
for idx, namespace in enumerate(namespaces, start=1):
# Connect to config database
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()

# Make sure configuration data exists
monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA")
if not bool(monitorData):
ctx.fail("Fabric monitor configuration data not present")

# Update entry
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
{"monCapacityThreshWarn": capacitythreshold})
Loading

0 comments on commit a5f9308

Please sign in to comment.