diff options
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/net/ynl/cli.py (renamed from tools/net/ynl/samples/cli.py) | 17 | ||||
-rw-r--r-- | tools/net/ynl/lib/__init__.py | 7 | ||||
-rw-r--r-- | tools/net/ynl/lib/nlspec.py | 310 | ||||
-rw-r--r-- | tools/net/ynl/lib/ynl.py (renamed from tools/net/ynl/samples/ynl.py) | 192 | ||||
-rwxr-xr-x | tools/net/ynl/ynl-gen-c.py | 262 | ||||
-rwxr-xr-x | tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh | 68 | ||||
-rwxr-xr-x | tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh | 23 | ||||
-rwxr-xr-x | tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh | 27 | ||||
-rwxr-xr-x | tools/testing/selftests/net/forwarding/lib.sh | 21 |
9 files changed, 568 insertions, 359 deletions
diff --git a/tools/net/ynl/samples/cli.py b/tools/net/ynl/cli.py index b27159c70710..db410b74d539 100755 --- a/tools/net/ynl/samples/cli.py +++ b/tools/net/ynl/cli.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # SPDX-License-Identifier: BSD-3-Clause import argparse @@ -6,13 +6,14 @@ import json import pprint import time -from ynl import YnlFamily +from lib import YnlFamily def main(): parser = argparse.ArgumentParser(description='YNL CLI sample') parser.add_argument('--spec', dest='spec', type=str, required=True) parser.add_argument('--schema', dest='schema', type=str) + parser.add_argument('--no-schema', action='store_true') parser.add_argument('--json', dest='json_text', type=str) parser.add_argument('--do', dest='do', type=str) parser.add_argument('--dump', dest='dump', type=str) @@ -20,6 +21,9 @@ def main(): parser.add_argument('--subscribe', dest='ntf', type=str) args = parser.parse_args() + if args.no_schema: + args.schema = '' + attrs = {} if args.json_text: attrs = json.loads(args.json_text) @@ -32,10 +36,11 @@ def main(): if args.sleep: time.sleep(args.sleep) - if args.do or args.dump: - method = getattr(ynl, args.do if args.do else args.dump) - - reply = method(attrs, dump=bool(args.dump)) + if args.do: + reply = ynl.do(args.do, attrs) + pprint.PrettyPrinter().pprint(reply) + if args.dump: + reply = ynl.dump(args.dump, attrs) pprint.PrettyPrinter().pprint(reply) if args.ntf: diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py new file mode 100644 index 000000000000..3c73f59eabab --- /dev/null +++ b/tools/net/ynl/lib/__init__.py @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause + +from .nlspec import SpecAttr, SpecAttrSet, SpecFamily, SpecOperation +from .ynl import YnlFamily + +__all__ = ["SpecAttr", "SpecAttrSet", "SpecFamily", "SpecOperation", + "YnlFamily"] diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py new file mode 100644 index 000000000000..e204679ad8b7 --- /dev/null +++ b/tools/net/ynl/lib/nlspec.py @@ -0,0 +1,310 @@ +# SPDX-License-Identifier: BSD-3-Clause + +import collections +import importlib +import os +import traceback +import yaml + + +# To be loaded dynamically as needed +jsonschema = None + + +class SpecElement: + """Netlink spec element. + + Abstract element of the Netlink spec. Implements the dictionary interface + for access to the raw spec. Supports iterative resolution of dependencies + across elements and class inheritance levels. The elements of the spec + may refer to each other, and although loops should be very rare, having + to maintain correct ordering of instantiation is painful, so the resolve() + method should be used to perform parts of init which require access to + other parts of the spec. + + Attributes: + yaml raw spec as loaded from the spec file + family back reference to the full family + + name name of the entity as listed in the spec (optional) + ident_name name which can be safely used as identifier in code (optional) + """ + def __init__(self, family, yaml): + self.yaml = yaml + self.family = family + + if 'name' in self.yaml: + self.name = self.yaml['name'] + self.ident_name = self.name.replace('-', '_') + + self._super_resolved = False + family.add_unresolved(self) + + def __getitem__(self, key): + return self.yaml[key] + + def __contains__(self, key): + return key in self.yaml + + def get(self, key, default=None): + return self.yaml.get(key, default) + + def resolve_up(self, up): + if not self._super_resolved: + up.resolve() + self._super_resolved = True + + def resolve(self): + pass + + +class SpecAttr(SpecElement): + """ Single Netlink atttribute type + + Represents a single attribute type within an attr space. + + Attributes: + value numerical ID when serialized + attr_set Attribute Set containing this attr + """ + def __init__(self, family, attr_set, yaml, value): + super().__init__(family, yaml) + + self.value = value + self.attr_set = attr_set + self.is_multi = yaml.get('multi-attr', False) + + +class SpecAttrSet(SpecElement): + """ Netlink Attribute Set class. + + Represents a ID space of attributes within Netlink. + + Note that unlike other elements, which expose contents of the raw spec + via the dictionary interface Attribute Set exposes attributes by name. + + Attributes: + attrs ordered dict of all attributes (indexed by name) + attrs_by_val ordered dict of all attributes (indexed by value) + subset_of parent set if this is a subset, otherwise None + """ + def __init__(self, family, yaml): + super().__init__(family, yaml) + + self.subset_of = self.yaml.get('subset-of', None) + + self.attrs = collections.OrderedDict() + self.attrs_by_val = collections.OrderedDict() + + val = 0 + for elem in self.yaml['attributes']: + if 'value' in elem: + val = elem['value'] + + attr = self.new_attr(elem, val) + self.attrs[attr.name] = attr + self.attrs_by_val[attr.value] = attr + val += 1 + + def new_attr(self, elem, value): + return SpecAttr(self.family, self, elem, value) + + def __getitem__(self, key): + return self.attrs[key] + + def __contains__(self, key): + return key in self.attrs + + def __iter__(self): + yield from self.attrs + + def items(self): + return self.attrs.items() + + +class SpecOperation(SpecElement): + """Netlink Operation + + Information about a single Netlink operation. + + Attributes: + value numerical ID when serialized, None if req/rsp values differ + + req_value numerical ID when serialized, user -> kernel + rsp_value numerical ID when serialized, user <- kernel + is_call bool, whether the operation is a call + is_async bool, whether the operation is a notification + is_resv bool, whether the operation does not exist (it's just a reserved ID) + attr_set attribute set name + + yaml raw spec as loaded from the spec file + """ + def __init__(self, family, yaml, req_value, rsp_value): + super().__init__(family, yaml) + + self.value = req_value if req_value == rsp_value else None + self.req_value = req_value + self.rsp_value = rsp_value + + self.is_call = 'do' in yaml or 'dump' in yaml + self.is_async = 'notify' in yaml or 'event' in yaml + self.is_resv = not self.is_async and not self.is_call + + # Added by resolve: + self.attr_set = None + delattr(self, "attr_set") + + def resolve(self): + self.resolve_up(super()) + + if 'attribute-set' in self.yaml: + attr_set_name = self.yaml['attribute-set'] + elif 'notify' in self.yaml: + msg = self.family.msgs[self.yaml['notify']] + attr_set_name = msg['attribute-set'] + elif self.is_resv: + attr_set_name = '' + else: + raise Exception(f"Can't resolve attribute set for op '{self.name}'") + if attr_set_name: + self.attr_set = self.family.attr_sets[attr_set_name] + + +class SpecFamily(SpecElement): + """ Netlink Family Spec class. + + Netlink family information loaded from a spec (e.g. in YAML). + Takes care of unfolding implicit information which can be skipped + in the spec itself for brevity. + + The class can be used like a dictionary to access the raw spec + elements but that's usually a bad idea. + + Attributes: + proto protocol type (e.g. genetlink) + + attr_sets dict of attribute sets + msgs dict of all messages (index by name) + msgs_by_value dict of all messages (indexed by name) + ops dict of all valid requests / responses + """ + def __init__(self, spec_path, schema_path=None): + with open(spec_path, "r") as stream: + spec = yaml.safe_load(stream) + + self._resolution_list = [] + + super().__init__(self, spec) + + self.proto = self.yaml.get('protocol', 'genetlink') + + if schema_path is None: + schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml' + if schema_path: + global jsonschema + + with open(schema_path, "r") as stream: + schema = yaml.safe_load(stream) + + if jsonschema is None: + jsonschema = importlib.import_module("jsonschema") + + jsonschema.validate(self.yaml, schema) + + self.attr_sets = collections.OrderedDict() + self.msgs = collections.OrderedDict() + self.req_by_value = collections.OrderedDict() + self.rsp_by_value = collections.OrderedDict() + self.ops = collections.OrderedDict() + + last_exception = None + while len(self._resolution_list) > 0: + resolved = [] + unresolved = self._resolution_list + self._resolution_list = [] + + for elem in unresolved: + try: + elem.resolve() + except (KeyError, AttributeError) as e: + self._resolution_list.append(elem) + last_exception = e + continue + + resolved.append(elem) + + if len(resolved) == 0: + traceback.print_exception(last_exception) + raise Exception("Could not resolve any spec element, infinite loop?") + + def new_attr_set(self, elem): + return SpecAttrSet(self, elem) + + def new_operation(self, elem, req_val, rsp_val): + return SpecOperation(self, elem, req_val, rsp_val) + + def add_unresolved(self, elem): + self._resolution_list.append(elem) + + def _dictify_ops_unified(self): + val = 0 + for elem in self.yaml['operations']['list']: + if 'value' in elem: + val = elem['value'] + + op = self.new_operation(elem, val, val) + val += 1 + + self.msgs[op.name] = op + + def _dictify_ops_directional(self): + req_val = rsp_val = 0 + for elem in self.yaml['operations']['list']: + if 'notify' in elem: + if 'value' in elem: + rsp_val = elem['value'] + req_val_next = req_val + rsp_val_next = rsp_val + 1 + req_val = None + elif 'do' in elem or 'dump' in elem: + mode = elem['do'] if 'do' in elem else elem['dump'] + + v = mode.get('request', {}).get('value', None) + if v: + req_val = v + v = mode.get('reply', {}).get('value', None) + if v: + rsp_val = v + + rsp_inc = 1 if 'reply' in mode else 0 + req_val_next = req_val + 1 + rsp_val_next = rsp_val + rsp_inc + else: + raise Exception("Can't parse directional ops") + + op = self.new_operation(elem, req_val, rsp_val) + req_val = req_val_next + rsp_val = rsp_val_next + + self.msgs[op.name] = op + + def resolve(self): + self.resolve_up(super()) + + for elem in self.yaml['attribute-sets']: + attr_set = self.new_attr_set(elem) + self.attr_sets[elem['name']] = attr_set + + msg_id_model = self.yaml['operations'].get('enum-model', 'unified') + if msg_id_model == 'unified': + self._dictify_ops_unified() + elif msg_id_model == 'directional': + self._dictify_ops_directional() + + for op in self.msgs.values(): + if op.req_value is not None: + self.req_by_value[op.req_value] = op + if op.rsp_value is not None: + self.rsp_by_value[op.rsp_value] = op + if not op.is_async and 'attribute-set' in op: + self.ops[op.name] = op diff --git a/tools/net/ynl/samples/ynl.py b/tools/net/ynl/lib/ynl.py index b71523d71d46..1c7411ee04dc 100644 --- a/tools/net/ynl/samples/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -1,13 +1,14 @@ # SPDX-License-Identifier: BSD-3-Clause import functools -import jsonschema import os import random import socket import struct import yaml +from .nlspec import SpecFamily + # # Generic Netlink code which should really be in some library, but I can't quickly find one. # @@ -74,6 +75,9 @@ class NlAttr: self.full_len = (self.payload_len + 3) & ~3 self.raw = raw[offset + 4:offset + self.payload_len] + def as_u8(self): + return struct.unpack("B", self.raw)[0] + def as_u16(self): return struct.unpack("H", self.raw)[0] @@ -158,8 +162,8 @@ class NlMsg: # We don't have the ability to parse nests yet, so only do global if 'miss-type' in self.extack and 'miss-nest' not in self.extack: miss_type = self.extack['miss-type'] - if len(attr_space.attr_list) > miss_type: - spec = attr_space.attr_list[miss_type] + if miss_type in attr_space.attrs_by_val: + spec = attr_space.attrs_by_val[miss_type] desc = spec['name'] if 'doc' in spec: desc += f" ({spec['doc']})" @@ -289,100 +293,31 @@ class GenlFamily: # -class YnlAttrSpace: - def __init__(self, family, yaml): - self.yaml = yaml - - self.attrs = dict() - self.name = self.yaml['name'] - self.subspace_of = self.yaml['subset-of'] if 'subspace-of' in self.yaml else None - - val = 0 - max_val = 0 - for elem in self.yaml['attributes']: - if 'value' in elem: - val = elem['value'] - else: - elem['value'] = val - if val > max_val: - max_val = val - val += 1 - - self.attrs[elem['name']] = elem - - self.attr_list = [None] * (max_val + 1) - for elem in self.yaml['attributes']: - self.attr_list[elem['value']] = elem - - def __getitem__(self, key): - return self.attrs[key] - - def __contains__(self, key): - return key in self.yaml - - def __iter__(self): - yield from self.attrs - - def items(self): - return self.attrs.items() - - -class YnlFamily: +class YnlFamily(SpecFamily): def __init__(self, def_path, schema=None): - self.include_raw = False + super().__init__(def_path, schema) - with open(def_path, "r") as stream: - self.yaml = yaml.safe_load(stream) - - if schema: - with open(schema, "r") as stream: - schema = yaml.safe_load(stream) - - jsonschema.validate(self.yaml, schema) + self.include_raw = False self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC) self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1) self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1) - self._ops = dict() - self._spaces = dict() self._types = dict() - for elem in self.yaml['attribute-sets']: - self._spaces[elem['name']] = YnlAttrSpace(self, elem) - - for elem in self.yaml['definitions']: + for elem in self.yaml.get('definitions', []): self._types[elem['name']] = elem - async_separation = 'async-prefix' in self.yaml['operations'] self.async_msg_ids = set() self.async_msg_queue = [] - val = 0 - max_val = 0 - for elem in self.yaml['operations']['list']: - if not (async_separation and ('notify' in elem or 'event' in elem)): - if 'value' in elem: - val = elem['value'] - else: - elem['value'] = val - val += 1 - max_val = max(val, max_val) - - if 'notify' in elem or 'event' in elem: - self.async_msg_ids.add(elem['value']) - - self._ops[elem['name']] = elem - - op_name = elem['name'].replace('-', '_') - bound_f = functools.partial(self._op, elem['name']) - setattr(self, op_name, bound_f) + for msg in self.msgs.values(): + if msg.is_async: + self.async_msg_ids.add(msg.rsp_value) - self._op_array = [None] * max_val - for _, op in self._ops.items(): - self._op_array[op['value']] = op - if 'notify' in op: - op['attribute-set'] = self._ops[op['notify']]['attribute-set'] + for op_name, op in self.ops.items(): + bound_f = functools.partial(self._op, op_name) + setattr(self, op.ident_name, bound_f) self.family = GenlFamily(self.yaml['name']) @@ -395,13 +330,15 @@ class YnlFamily: self.family.genl_family['mcast'][mcast_name]) def _add_attr(self, space, name, value): - attr = self._spaces[space][name] - nl_type = attr['value'] + attr = self.attr_sets[space][name] + nl_type = attr.value if attr["type"] == 'nest': nl_type |= Netlink.NLA_F_NESTED attr_payload = b'' for subname, subvalue in value.items(): attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue) + elif attr["type"] == 'flag': + attr_payload = b'' elif attr["type"] == 'u32': attr_payload = struct.pack("I", int(value)) elif attr["type"] == 'string': @@ -430,36 +367,81 @@ class YnlFamily: rsp[attr_spec['name']] = value def _decode(self, attrs, space): - attr_space = self._spaces[space] + attr_space = self.attr_sets[space] rsp = dict() for attr in attrs: - attr_spec = attr_space.attr_list[attr.type] + attr_spec = attr_space.attrs_by_val[attr.type] if attr_spec["type"] == 'nest': subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes']) - rsp[attr_spec['name']] = subdict + decoded = subdict + elif attr_spec['type'] == 'u8': + decoded = attr.as_u8() elif attr_spec['type'] == 'u32': - rsp[attr_spec['name']] = attr.as_u32() + decoded = attr.as_u32() elif attr_spec['type'] == 'u64': - rsp[attr_spec['name']] = attr.as_u64() + decoded = attr.as_u64() elif attr_spec["type"] == 'string': - rsp[attr_spec['name']] = attr.as_strz() + decoded = attr.as_strz() elif attr_spec["type"] == 'binary': - rsp[attr_spec['name']] = attr.as_bin() + decoded = attr.as_bin() + elif attr_spec["type"] == 'flag': + decoded = True else: raise Exception(f'Unknown {attr.type} {attr_spec["name"]} {attr_spec["type"]}') + if not attr_spec.is_multi: + rsp[attr_spec['name']] = decoded + elif attr_spec.name in rsp: + rsp[attr_spec.name].append(decoded) + else: + rsp[attr_spec.name] = [decoded] + if 'enum' in attr_spec: self._decode_enum(rsp, attr_spec) return rsp + def _decode_extack_path(self, attrs, attr_set, offset, target): + for attr in attrs: + attr_spec = attr_set.attrs_by_val[attr.type] + if offset > target: + break + if offset == target: + return '.' + attr_spec.name + + if offset + attr.full_len <= target: + offset += attr.full_len + continue + if attr_spec['type'] != 'nest': + raise Exception(f"Can't dive into {attr.type} ({attr_spec['name']}) for extack") + offset += 4 + subpath = self._decode_extack_path(NlAttrs(attr.raw), + self.attr_sets[attr_spec['nested-attributes']], + offset, target) + if subpath is None: + return None + return '.' + attr_spec.name + subpath + + return None + + def _decode_extack(self, request, attr_space, extack): + if 'bad-attr-offs' not in extack: + return + + genl_req = GenlMsg(NlMsg(request, 0, attr_space=attr_space)) + path = self._decode_extack_path(genl_req.raw_attrs, attr_space, + 20, extack['bad-attr-offs']) + if path: + del extack['bad-attr-offs'] + extack['bad-attr'] = path + def handle_ntf(self, nl_msg, genl_msg): msg = dict() if self.include_raw: msg['nlmsg'] = nl_msg msg['genlmsg'] = genl_msg - op = self._op_array[genl_msg.genl_cmd] + op = self.rsp_by_value[genl_msg.genl_cmd] msg['name'] = op['name'] - msg['msg'] = self._decode(genl_msg.raw_attrs, op['attribute-set']) + msg['msg'] = self._decode(genl_msg.raw_attrs, op.attr_set.name) self.async_msg_queue.append(msg) def check_ntf(self): @@ -487,16 +469,16 @@ class YnlFamily: self.handle_ntf(nl_msg, gm) def _op(self, method, vals, dump=False): - op = self._ops[method] + op = self.ops[method] nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK if dump: nl_flags |= Netlink.NLM_F_DUMP req_seq = random.randint(1024, 65535) - msg = _genl_msg(self.family.family_id, nl_flags, op['value'], 1, req_seq) + msg = _genl_msg(self.family.family_id, nl_flags, op.req_value, 1, req_seq) for name, value in vals.items(): - msg += self._add_attr(op['attribute-set'], name, value) + msg += self._add_attr(op.attr_set.name, name, value) msg = _genl_msg_finalize(msg) self.sock.send(msg, 0) @@ -505,19 +487,25 @@ class YnlFamily: rsp = [] while not done: reply = self.sock.recv(128 * 1024) - nms = NlMsgs(reply, attr_space=self._spaces[op['attribute-set']]) + nms = NlMsgs(reply, attr_space=op.attr_set) for nl_msg in nms: + if nl_msg.extack: + self._decode_extack(msg, op.attr_set, nl_msg.extack) + if nl_msg.error: print("Netlink error:", os.strerror(-nl_msg.error)) print(nl_msg) return if nl_msg.done: + if nl_msg.extack: + print("Netlink warning:") + print(nl_msg) done = True break gm = GenlMsg(nl_msg) # Check if this is a reply to our request - if nl_msg.nl_seq != req_seq or gm.genl_cmd != op['value']: + if nl_msg.nl_seq != req_seq or gm.genl_cmd != op.rsp_value: if gm.genl_cmd in self.async_msg_ids: self.handle_ntf(nl_msg, gm) continue @@ -525,10 +513,16 @@ class YnlFamily: print('Unexpected message: ' + repr(gm)) continue - rsp.append(self._decode(gm.raw_attrs, op['attribute-set'])) + rsp.append(self._decode(gm.raw_attrs, op.attr_set.name)) if not rsp: return None if not dump and len(rsp) == 1: return rsp[0] return rsp + + def do(self, method, vals): + return self._op(method, vals) + + def dump(self, method, vals): + return self._op(method, vals, dump=True) diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index 1aa872e582ab..3942f24b9163 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -1,11 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import argparse import collections -import jsonschema import os import yaml +from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation + def c_upper(name): return name.upper().replace('-', '_') @@ -28,12 +29,12 @@ class BaseNlLib: "ynl_cb_array, NLMSG_MIN_TYPE)" -class Type: - def __init__(self, family, attr_set, attr): - self.family = family +class Type(SpecAttr): + def __init__(self, family, attr_set, attr, value): + super().__init__(family, attr_set, attr, value) + self.attr = attr - self.value = attr['value'] - self.name = c_lower(attr['name']) + self.attr_set = attr_set self.type = attr['type'] self.checks = attr.get('checks', {}) @@ -46,17 +47,17 @@ class Type: else: self.nested_render_name = f"{family.name}_{c_lower(self.nested_attrs)}" - self.enum_name = f"{attr_set.name_prefix}{self.name}" - self.enum_name = c_upper(self.enum_name) self.c_name = c_lower(self.name) if self.c_name in _C_KW: self.c_name += '_' - def __getitem__(self, key): - return self.attr[key] + # Added by resolve(): + self.enum_name = None + delattr(self, "enum_name") - def __contains__(self, key): - return key in self.attr + def resolve(self): + self.enum_name = f"{self.attr_set.name_prefix}{self.name}" + self.enum_name = c_upper(self.enum_name) def is_multi_val(self): return None @@ -214,24 +215,34 @@ class TypePad(Type): class TypeScalar(Type): - def __init__(self, family, attr_set, attr): - super().__init__(family, attr_set, attr) + def __init__(self, family, attr_set, attr, value): + super().__init__(family, attr_set, attr, value) + + self.byte_order_comment = '' + if 'byte-order' in attr: + self.byte_order_comment = f" /* {attr['byte-order']} */" + + # Added by resolve(): + self.is_bitfield = None + delattr(self, "is_bitfield") + self.type_name = None + delattr(self, "type_name") + + def resolve(self): + self.resolve_up(super()) - self.is_bitfield = False - if 'enum' in self.attr: - self.is_bitfield = family.consts[self.attr['enum']]['type'] == 'flags' if 'enum-as-flags' in self.attr and self.attr['enum-as-flags']: self.is_bitfield = True + elif 'enum' in self.attr: + self.is_bitfield = self.family.consts[self.attr['enum']]['type'] == 'flags' + else: + self.is_bitfield = False if 'enum' in self.attr and not self.is_bitfield: - self.type_name = f"enum {family.name}_{c_lower(self.attr['enum'])}" + self.type_name = f"enum {self.family.name}_{c_lower(self.attr['enum'])}" else: self.type_name = '__' + self.type - self.byte_order_comment = '' - if 'byte-order' in attr: - self.byte_order_comment = f" /* {attr['byte-order']} */" - def _mnl_type(self): t = self.type # mnl does not have a helper for signed types @@ -648,14 +659,11 @@ class EnumSet: return mask -class AttrSet: +class AttrSet(SpecAttrSet): def __init__(self, family, yaml): - self.yaml = yaml + super().__init__(family, yaml) - self.attrs = dict() - self.name = self.yaml['name'] - if 'subset-of' not in yaml: - self.subset_of = None + if self.subset_of is None: if 'name-prefix' in yaml: pfx = yaml['name-prefix'] elif self.name == family.name: @@ -665,83 +673,68 @@ class AttrSet: self.name_prefix = c_upper(pfx) self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max")) else: - self.subset_of = self.yaml['subset-of'] self.name_prefix = family.attr_sets[self.subset_of].name_prefix self.max_name = family.attr_sets[self.subset_of].max_name + # Added by resolve: + self.c_name = None + delattr(self, "c_name") + + def resolve(self): self.c_name = c_lower(self.name) if self.c_name in _C_KW: self.c_name += '_' - if self.c_name == family.c_name: + if self.c_name == self.family.c_name: self.c_name = '' - val = 0 - for elem in self.yaml['attributes']: - if 'value' in elem: - val = elem['value'] - else: - elem['value'] = val - val += 1 - - if 'multi-attr' in elem and elem['multi-attr']: - attr = TypeMultiAttr(family, self, elem) - elif elem['type'] in scalars: - attr = TypeScalar(family, self, elem) - elif elem['type'] == 'unused': - attr = TypeUnused(family, self, elem) - elif elem['type'] == 'pad': - attr = TypePad(family, self, elem) - elif elem['type'] == 'flag': - attr = TypeFlag(family, self, elem) - elif elem['type'] == 'string': - attr = TypeString(family, self, elem) - elif elem['type'] == 'binary': - attr = TypeBinary(family, self, elem) - elif elem['type'] == 'nest': - attr = TypeNest(family, self, elem) - elif elem['type'] == 'array-nest': - attr = TypeArrayNest(family, self, elem) - elif elem['type'] == 'nest-type-value': - attr = TypeNestTypeValue(family, self, elem) - else: - raise Exception(f"No typed class for type {elem['type']}") - - self.attrs[elem['name']] = attr - - def __getitem__(self, key): - return self.attrs[key] - - def __contains__(self, key): - return key in self.yaml - - def __iter__(self): - yield from self.attrs + def new_attr(self, elem, value): + if 'multi-attr' in elem and elem['multi-attr']: + return TypeMultiAttr(self.family, self, elem, value) + elif elem['type'] in scalars: + return TypeScalar(self.family, self, elem, value) + elif elem['type'] == 'unused': + return TypeUnused(self.family, self, elem, value) + elif elem['type'] == 'pad': + return TypePad(self.family, self, elem, value) + elif elem['type'] == 'flag': + return TypeFlag(self.family, self, elem, value) + elif elem['type'] == 'string': + return TypeString(self.family, self, elem, value) + elif elem['type'] == 'binary': + return TypeBinary(self.family, self, elem, value) + elif elem['type'] == 'nest': + return TypeNest(self.family, self, elem, value) + elif elem['type'] == 'array-nest': + return TypeArrayNest(self.family, self, elem, value) + elif elem['type'] == 'nest-type-value': + return TypeNestTypeValue(self.family, self, elem, value) + else: + raise Exception(f"No typed class for type {elem['type']}") - def items(self): - return self.attrs.items() +class Operation(SpecOperation): + def __init__(self, family, yaml, req_value, rsp_value): + super().__init__(family, yaml, req_value, rsp_value) -class Operation: - def __init__(self, family, yaml, value): - self.yaml = yaml - self.value = value + if req_value != rsp_value: + raise Exception("Directional messages not supported by codegen") - self.name = self.yaml['name'] self.render_name = family.name + '_' + c_lower(self.name) - self.is_async = 'notify' in yaml or 'event' in yaml - if not self.is_async: - self.enum_name = family.op_prefix + c_upper(self.name) - else: - self.enum_name = family.async_op_prefix + c_upper(self.name) self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \ ('dump' in yaml and 'request' in yaml['dump']) - def __getitem__(self, key): - return self.yaml[key] + # Added by resolve: + self.enum_name = None + delattr(self, "enum_name") - def __contains__(self, key): - return key in self.yaml + def resolve(self): + self.resolve_up(super()) + + if not self.is_async: + self.enum_name = self.family.op_prefix + c_upper(self.name) + else: + self.enum_name = self.family.async_op_prefix + c_upper(self.name) def add_notification(self, op): if 'notify' not in self.yaml: @@ -751,21 +744,23 @@ class Operation: self.yaml['notify']['cmds'].append(op) -class Family: +class Family(SpecFamily): def __init__(self, file_name): - with open(file_name, "r") as stream: - self.yaml = yaml.safe_load(stream) - - self.proto = self.yaml.get('protocol', 'genetlink') - - with open(os.path.dirname(os.path.dirname(file_name)) + - f'/{self.proto}.yaml', "r") as stream: - schema = yaml.safe_load(stream) - - jsonschema.validate(self.yaml, schema) - - if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}: - raise Exception("Codegen only supported for genetlink") + # Added by resolve: + self.c_name = None + delattr(self, "c_name") + self.op_prefix = None + delattr(self, "op_prefix") + self.async_op_prefix = None + delattr(self, "async_op_prefix") + self.mcgrps = None + delattr(self, "mcgrps") + self.consts = None + delattr(self, "consts") + self.hooks = None + delattr(self, "hooks") + + super().__init__(file_name) self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME')) self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION')) @@ -773,12 +768,18 @@ class Family: if 'definitions' not in self.yaml: self.yaml['definitions'] = [] - self.name = self.yaml['name'] - self.c_name = c_lower(self.name) if 'uapi-header' in self.yaml: self.uapi_header = self.yaml['uapi-header'] else: self.uapi_header = f"linux/{self.name}.h" + + def resolve(self): + self.resolve_up(super()) + + if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}: + raise Exception("Codegen only supported for genetlink") + + self.c_name = c_lower(self.name) if 'name-prefix' in self.yaml['operations']: self.op_prefix = c_upper(self.yaml['operations']['name-prefix']) else: @@ -791,12 +792,6 @@ class Family: self.mcgrps = self.yaml.get('mcast-groups', {'list': []}) self.consts = dict() - # list of all operations - self.msg_list = [] - # dict of operations which have their own message type (have attributes) - self.ops = collections.OrderedDict() - self.attr_sets = dict() - self.attr_sets_list = [] self.hooks = dict() for when in ['pre', 'post']: @@ -824,11 +819,11 @@ class Family: if self.kernel_policy == 'global': self._load_global_policy() - def __getitem__(self, key): - return self.yaml[key] + def new_attr_set(self, elem): + return AttrSet(self, elem) - def get(self, key, default=None): - return self.yaml.get(key, default) + def new_operation(self, elem, req_value, rsp_value): + return Operation(self, elem, req_value, rsp_value) # Fake a 'do' equivalent of all events, so that we can render their response parsing def _mock_up_events(self): @@ -847,27 +842,10 @@ class Family: else: self.consts[elem['name']] = elem - for elem in self.yaml['attribute-sets']: - attr_set = AttrSet(self, elem) - self.attr_sets[elem['name']] = attr_set - self.attr_sets_list.append((elem['name'], attr_set), ) - ntf = [] - val = 0 - for elem in self.yaml['operations']['list']: - if 'value' in elem: - val = elem['value'] - - op = Operation(self, elem, val) - val += 1 - - self.msg_list.append(op) - if 'notify' in elem: - ntf.append(op) - continue - if 'attribute-set' not in elem: - continue - self.ops[elem['name']] = op + for msg in self.msgs.values(): + if 'notify' in msg: + ntf.append(msg) for n in ntf: self.ops[n['notify']].add_notification(n) @@ -933,7 +911,7 @@ class Family: if attr_set_name != op['attribute-set']: raise Exception('For a global policy all ops must use the same set') - for op_mode in {'do', 'dump'}: + for op_mode in ['do', 'dump']: if op_mode in op: global_set.update(op[op_mode].get('request', [])) @@ -2033,7 +2011,7 @@ def render_uapi(family, cw): max_by_define = family.get('max-by-define', False) - for _, attr_set in family.attr_sets_list: + for _, attr_set in family.attr_sets.items(): if attr_set.subset_of: continue @@ -2044,9 +2022,9 @@ def render_uapi(family, cw): uapi_enum_start(family, cw, attr_set.yaml, 'enum-name') for _, attr in attr_set.items(): suffix = ',' - if attr['value'] != val: - suffix = f" = {attr['value']}," - val = attr['value'] + if attr.value != val: + suffix = f" = {attr.value}," + val = attr.value val += 1 cw.p(attr.enum_name + suffix) cw.nl() @@ -2066,7 +2044,7 @@ def render_uapi(family, cw): max_value = f"({cnt_name} - 1)" uapi_enum_start(family, cw, family['operations'], 'enum-name') - for op in family.msg_list: + for op in family.msgs.values(): if separate_ntf and ('notify' in op or 'event' in op): continue @@ -2085,7 +2063,7 @@ def render_uapi(family, cw): if separate_ntf: uapi_enum_start(family, cw, family['operations'], enum_name='async-enum') - for op in family.msg_list: + for op in family.msgs.values(): if separate_ntf and not ('notify' in op or 'event' in op): continue @@ -2244,7 +2222,7 @@ def main(): for op_name, op in parsed.ops.items(): if parsed.kernel_policy in {'per-op', 'split'}: - for op_mode in {'do', 'dump'}: + for op_mode in ['do', 'dump']: if op_mode in op and 'request' in op[op_mode]: cw.p(f"/* {op.enum_name} - {op_mode} */") ri = RenderInfo(cw, parsed, args.mode, op, op_name, op_mode) diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh index 71066bc4b886..5492fa5550d7 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh @@ -5,18 +5,18 @@ # prioritized according to the default priority specified at the port. # rx_octets_prio_* counters are used to verify the prioritization. # -# +-----------------------+ -# | H1 | -# | + $h1 | -# | | 192.0.2.1/28 | -# +----|------------------+ +# +----------------------------------+ +# | H1 | +# | + $h1 | +# | | 192.0.2.1/28 | +# +----|-----------------------------+ # | -# +----|------------------+ -# | SW | | -# | + $swp1 | -# | 192.0.2.2/28 | -# | APP=<prio>,1,0 | -# +-----------------------+ +# +----|-----------------------------+ +# | SW | | +# | + $swp1 | +# | 192.0.2.2/28 | +# | dcb app default-prio <prio> | +# +----------------------------------+ ALL_TESTS=" ping_ipv4 @@ -29,42 +29,6 @@ NUM_NETIFS=2 : ${HIT_TIMEOUT:=1000} # ms source $lib_dir/lib.sh -declare -a APP - -defprio_install() -{ - local dev=$1; shift - local prio=$1; shift - local app="app=$prio,1,0" - - lldptool -T -i $dev -V APP $app >/dev/null - lldpad_app_wait_set $dev - APP[$prio]=$app -} - -defprio_uninstall() -{ - local dev=$1; shift - local prio=$1; shift - local app=${APP[$prio]} - - lldptool -T -i $dev -V APP -d $app >/dev/null - lldpad_app_wait_del - unset APP[$prio] -} - -defprio_flush() -{ - local dev=$1; shift - local prio - - if ((${#APP[@]})); then - lldptool -T -i $dev -V APP -d ${APP[@]} >/dev/null - fi - lldpad_app_wait_del - APP=() -} - h1_create() { simple_if_init $h1 192.0.2.1/28 @@ -83,7 +47,7 @@ switch_create() switch_destroy() { - defprio_flush $swp1 + dcb app flush dev $swp1 default-prio ip addr del dev $swp1 192.0.2.2/28 ip link set dev $swp1 down } @@ -124,7 +88,7 @@ __test_defprio() RET=0 - defprio_install $swp1 $prio_install + dcb app add dev $swp1 default-prio $prio_install local t0=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe) mausezahn -q $h1 -d 100m -c 10 -t arp reply @@ -134,7 +98,7 @@ __test_defprio() check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $((t1 - t0))." log_test "Default priority $prio_install/$prio_observe" - defprio_uninstall $swp1 $prio_install + dcb app del dev $swp1 default-prio $prio_install } test_defprio() @@ -145,7 +109,7 @@ test_defprio() __test_defprio $prio $prio done - defprio_install $swp1 3 + dcb app add dev $swp1 default-prio 3 __test_defprio 0 3 __test_defprio 1 3 __test_defprio 2 3 @@ -153,7 +117,7 @@ test_defprio() __test_defprio 5 5 __test_defprio 6 6 __test_defprio 7 7 - defprio_uninstall $swp1 3 + dcb app del dev $swp1 default-prio 3 } trap cleanup EXIT diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh index 28a570006d4d..87c41f5727c9 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -20,7 +20,7 @@ # | SW | | | # | +-|----------------------------------------------------------------|-+ | # | | + $swp1 BR $swp2 + | | -# | | APP=0,5,10 .. 7,5,17 APP=0,5,20 .. 7,5,27 | | +# | | dcb dscp-prio 10:0...17:7 dcb dscp-prio 20:0...27:7 | | # | +--------------------------------------------------------------------+ | # +---------------------------------------------------------------------------+ @@ -62,16 +62,6 @@ h2_destroy() simple_if_fini $h2 192.0.2.2/28 } -dscp_map() -{ - local base=$1; shift - local prio - - for prio in {0..7}; do - echo app=$prio,5,$((base + prio)) - done -} - switch_create() { ip link add name br1 type bridge vlan_filtering 1 @@ -81,17 +71,14 @@ switch_create() ip link set dev $swp2 master br1 ip link set dev $swp2 up - lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null - lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null - lldpad_app_wait_set $swp1 - lldpad_app_wait_set $swp2 + dcb app add dev $swp1 dscp-prio 10:0 11:1 12:2 13:3 14:4 15:5 16:6 17:7 + dcb app add dev $swp2 dscp-prio 20:0 21:1 22:2 23:3 24:4 25:5 26:6 27:7 } switch_destroy() { - lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null - lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null - lldpad_app_wait_del + dcb app del dev $swp2 dscp-prio 20:0 21:1 22:2 23:3 24:4 25:5 26:6 27:7 + dcb app del dev $swp1 dscp-prio 10:0 11:1 12:2 13:3 14:4 15:5 16:6 17:7 ip link set dev $swp2 down ip link set dev $swp2 nomaster diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh index 4cb2aa65278a..f6c23f84423e 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh @@ -94,16 +94,6 @@ h2_destroy() simple_if_fini $h2 192.0.2.18/28 } -dscp_map() -{ - local base=$1; shift - local prio - - for prio in {0..7}; do - echo app=$prio,5,$((base + prio)) - done -} - switch_create() { simple_if_init $swp1 192.0.2.2/28 @@ -112,17 +102,14 @@ switch_create() tc qdisc add dev $swp1 clsact tc qdisc add dev $swp2 clsact - lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null - lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null - lldpad_app_wait_set $swp1 - lldpad_app_wait_set $swp2 + dcb app add dev $swp1 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + dcb app add dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 } switch_destroy() { - lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null - lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null - lldpad_app_wait_del + dcb app del dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + dcb app del dev $swp1 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 tc qdisc del dev $swp2 clsact tc qdisc del dev $swp1 clsact @@ -265,13 +252,11 @@ test_dscp_leftover() { echo "Test that last removed DSCP rule is deconfigured correctly" - lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null - lldpad_app_wait_del + dcb app del dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 __test_update 0 zero - lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null - lldpad_app_wait_set $swp2 + dcb app add dev $swp2 dscp-prio 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 } trap cleanup EXIT diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 1c4f866de7d7..29cd4705c752 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -524,27 +524,6 @@ cmd_jq() [ ! -z "$output" ] } -lldpad_app_wait_set() -{ - local dev=$1; shift - - while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do - echo "$dev: waiting for lldpad to push pending APP updates" - sleep 5 - done -} - -lldpad_app_wait_del() -{ - # Give lldpad a chance to push down the changes. If the device is downed - # too soon, the updates will be left pending. However, they will have - # been struck off the lldpad's DB already, so we won't be able to tell - # they are pending. Then on next test iteration this would cause - # weirdness as newly-added APP rules conflict with the old ones, - # sometimes getting stuck in an "unknown" state. - sleep 5 -} - pre_cleanup() { if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then |