diff options
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/flower/offload.c')
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/flower/offload.c | 339 |
1 files changed, 240 insertions, 99 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 1fbfeb43c538..7e725fa60347 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -52,8 +52,7 @@ #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) #define NFP_FLOWER_MERGE_FIELDS \ (NFP_FLOWER_LAYER_PORT | \ @@ -122,9 +121,9 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, return 0; } -static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) +static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f) { - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || @@ -132,14 +131,25 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } +static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + + return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); +} + static int -nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts, - u32 *key_layer_two, int *key_size) +nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, + u32 *key_layer_two, int *key_size, + struct netlink_ext_ack *extack) { - if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) + if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); return -EOPNOTSUPP; + } - if (enc_opts->key->len > 0) { + if (enc_opts->len > 0) { *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; *key_size += sizeof(struct nfp_flower_geneve_options); } @@ -148,13 +158,65 @@ nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts, } static int +nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, + struct flow_dissector_key_enc_opts *enc_op, + u32 *key_layer_two, u8 *key_layer, int *key_size, + struct nfp_flower_priv *priv, + enum nfp_flower_tun_type *tun_type, + struct netlink_ext_ack *extack) +{ + int err; + + switch (enc_ports->dst) { + case htons(IANA_VXLAN_UDP_PORT): + *tun_type = NFP_FL_TUNNEL_VXLAN; + *key_layer |= NFP_FLOWER_LAYER_VXLAN; + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (enc_op) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); + return -EOPNOTSUPP; + } + break; + case htons(GENEVE_UDP_PORT): + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload"); + return -EOPNOTSUPP; + } + *tun_type = NFP_FL_TUNNEL_GENEVE; + *key_layer |= NFP_FLOWER_LAYER_EXT_META; + *key_size += sizeof(struct nfp_flower_ext_meta); + *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (!enc_op) + break; + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); + return -EOPNOTSUPP; + } + err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, + key_size, extack); + if (err) + return err; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int nfp_flower_calculate_key_layers(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, - struct tc_cls_flower_offload *flow, - enum nfp_flower_tun_type *tun_type) + struct flow_cls_offload *flow, + enum nfp_flower_tun_type *tun_type, + struct netlink_ext_ack *extack) { - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct flow_dissector *dissector = rule->match.dissector; struct flow_match_basic basic = { NULL, NULL}; struct nfp_flower_priv *priv = app->priv; @@ -163,14 +225,18 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, int key_size; int err; - if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); return -EOPNOTSUPP; + } /* If any tun dissector is used then the required set must be used. */ if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) - != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) + != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); return -EOPNOTSUPP; + } key_layer_two = 0; key_layer = NFP_FLOWER_LAYER_PORT; @@ -188,8 +254,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_vlan(rule, &vlan); if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && - vlan.key->vlan_priority) + vlan.key->vlan_priority) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); return -EOPNOTSUPP; + } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { @@ -200,56 +268,68 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_enc_control(rule, &enc_ctl); - if (enc_ctl.mask->addr_type != 0xffff || - enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) + if (enc_ctl.mask->addr_type != 0xffff) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); + return -EOPNOTSUPP; + } + if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported"); return -EOPNOTSUPP; + } /* These fields are already verified as used. */ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); - if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) - return -EOPNOTSUPP; - - flow_rule_match_enc_ports(rule, &enc_ports); - if (enc_ports.mask->dst != cpu_to_be16(~0)) + if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); return -EOPNOTSUPP; + } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) flow_rule_match_enc_opts(rule, &enc_op); - switch (enc_ports.key->dst) { - case htons(IANA_VXLAN_UDP_PORT): - *tun_type = NFP_FL_TUNNEL_VXLAN; - key_layer |= NFP_FLOWER_LAYER_VXLAN; - key_size += sizeof(struct nfp_flower_ipv4_udp_tun); - if (enc_op.key) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + /* check if GRE, which has no enc_ports */ + if (netif_is_gretap(netdev)) { + *tun_type = NFP_FL_TUNNEL_GRE; + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_layer_two |= NFP_FLOWER_LAYER2_GRE; + key_size += + sizeof(struct nfp_flower_ipv4_gre_tun); + + if (enc_op.key) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); + return -EOPNOTSUPP; + } + } else { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); return -EOPNOTSUPP; - break; - case htons(GENEVE_UDP_PORT): - if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) + } + } else { + flow_rule_match_enc_ports(rule, &enc_ports); + if (enc_ports.mask->dst != cpu_to_be16(~0)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); return -EOPNOTSUPP; - *tun_type = NFP_FL_TUNNEL_GENEVE; - key_layer |= NFP_FLOWER_LAYER_EXT_META; - key_size += sizeof(struct nfp_flower_ext_meta); - key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; - key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + } - if (!enc_op.key) - break; - if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) - return -EOPNOTSUPP; - err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two, - &key_size); + err = nfp_flower_calc_udp_tun_layer(enc_ports.key, + enc_op.key, + &key_layer_two, + &key_layer, + &key_size, priv, + tun_type, extack); if (err) return err; - break; - default: - return -EOPNOTSUPP; - } - /* Ensure the ingress netdev matches the expected tun type. */ - if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) - return -EOPNOTSUPP; + /* Ensure the ingress netdev matches the expected + * tun type. + */ + if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); + return -EOPNOTSUPP; + } + } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) @@ -272,6 +352,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, * because we rely on it to get to the host. */ case cpu_to_be16(ETH_P_ARP): + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); return -EOPNOTSUPP; case cpu_to_be16(ETH_P_MPLS_UC): @@ -290,14 +371,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, /* Other ethtype - we need check the masks for the * remainder of the key to ensure we can offload. */ - if (nfp_flower_check_higher_than_mac(flow)) + if (nfp_flower_check_higher_than_mac(flow)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported"); return -EOPNOTSUPP; + } break; } } if (basic.mask && basic.mask->ip_proto) { - /* Ethernet type is present in the key. */ switch (basic.key->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: @@ -311,7 +393,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, /* Other ip proto - we need check the masks for the * remainder of the key to ensure we can offload. */ - return -EOPNOTSUPP; + if (nfp_flower_check_higher_than_l3(flow)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported"); + return -EOPNOTSUPP; + } + break; } } @@ -322,22 +408,28 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_tcp(rule, &tcp); tcp_flags = be16_to_cpu(tcp.key->flags); - if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) + if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); return -EOPNOTSUPP; + } /* We only support PSH and URG flags when either * FIN, SYN or RST is present as well. */ if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && - !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) + !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); return -EOPNOTSUPP; + } /* We need to store TCP flags in the either the IPv4 or IPv6 key * space, thus we need to ensure we include a IPv4/IPv6 key * layer if we have not done so already. */ - if (!basic.key) + if (!basic.key) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); return -EOPNOTSUPP; + } if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && !(key_layer & NFP_FLOWER_LAYER_IPV6)) { @@ -353,6 +445,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, break; default: + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6"); return -EOPNOTSUPP; } } @@ -362,8 +455,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, struct flow_match_control ctl; flow_rule_match_control(rule, &ctl); - if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) + if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag"); return -EOPNOTSUPP; + } } ret_key_ls->key_layer = key_layer; @@ -771,14 +866,16 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2) { - struct tc_cls_flower_offload merge_tc_off; + struct flow_cls_offload merge_tc_off; struct nfp_flower_priv *priv = app->priv; + struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; int err; ASSERT_RTNL(); + extack = merge_tc_off.common.extack; if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -816,7 +913,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, merge_tc_off.cookie = merge_flow->tc_flower_cookie; err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, - merge_flow->ingress_dev); + merge_flow->ingress_dev, extack); if (err) goto err_unlink_sub_flow2; @@ -865,15 +962,17 @@ err_destroy_merge_flow: */ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct flow_cls_offload *flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; + struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; struct nfp_port *port = NULL; int err; + extack = flow->common.extack; if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_port_from_netdev(netdev); @@ -882,7 +981,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, return -ENOMEM; err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, - &tun_type); + &tun_type, extack); if (err) goto err_free_key_ls; @@ -893,23 +992,25 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, } err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, - flow_pay, tun_type); + flow_pay, tun_type, extack); if (err) goto err_destroy_flow; - err = nfp_flower_compile_action(app, flow, netdev, flow_pay); + err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack); if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev); + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); if (err) goto err_destroy_flow; flow_pay->tc_flower_cookie = flow->cookie; err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, nfp_flower_table_params); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads"); goto err_release_metadata; + } err = nfp_flower_xmit_flow(app, flow_pay, NFP_FLOWER_CMSG_TYPE_FLOW_ADD); @@ -1024,19 +1125,23 @@ nfp_flower_del_linked_merge_flows(struct nfp_app *app, */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct flow_cls_offload *flow) { struct nfp_flower_priv *priv = app->priv; + struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow; struct nfp_port *port = NULL; int err; + extack = flow->common.extack; if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_port_from_netdev(netdev); nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); - if (!nfp_flow) + if (!nfp_flow) { + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist"); return -ENOENT; + } err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -1127,15 +1232,19 @@ nfp_flower_update_merge_stats(struct nfp_app *app, */ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct flow_cls_offload *flow) { struct nfp_flower_priv *priv = app->priv; + struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow; u32 ctx_id; + extack = flow->common.extack; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); - if (!nfp_flow) + if (!nfp_flow) { + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); return -EINVAL; + } ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); @@ -1156,17 +1265,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flower) + struct flow_cls_offload *flower) { if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; switch (flower->command) { - case TC_CLSFLOWER_REPLACE: + case FLOW_CLS_REPLACE: return nfp_flower_add_offload(app, netdev, flower); - case TC_CLSFLOWER_DESTROY: + case FLOW_CLS_DESTROY: return nfp_flower_del_offload(app, netdev, flower); - case TC_CLSFLOWER_STATS: + case FLOW_CLS_STATS: return nfp_flower_get_stats(app, netdev, flower); default: return -EOPNOTSUPP; @@ -1193,27 +1302,45 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, } } +static LIST_HEAD(nfp_block_cb_list); + static int nfp_flower_setup_tc_block(struct net_device *netdev, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct nfp_repr *repr = netdev_priv(netdev); struct nfp_flower_repr_priv *repr_priv; + struct flow_block_cb *block_cb; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; repr_priv = repr->app_priv; - repr_priv->block_shared = tcf_block_shared(f->block); + repr_priv->block_shared = f->block_shared; + f->driver_block_list = &nfp_block_cb_list; switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, - nfp_flower_setup_tc_block_cb, - repr, repr, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, - nfp_flower_setup_tc_block_cb, - repr); + case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr, + &nfp_block_cb_list)) + return -EBUSY; + + block_cb = flow_block_cb_alloc(f->net, + nfp_flower_setup_tc_block_cb, + repr, repr, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, + repr); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; @@ -1258,7 +1385,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct nfp_flower_indr_block_cb_priv *priv = cb_priv; - struct tc_cls_flower_offload *flower = type_data; + struct flow_cls_offload *flower = type_data; if (flower->common.chain_index) return -EOPNOTSUPP; @@ -1272,21 +1399,29 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, } } +static void nfp_flower_setup_indr_tc_release(void *cb_priv) +{ + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; + + list_del(&priv->list); + kfree(priv); +} + static int nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; - int err; + struct flow_block_cb *block_cb; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); if (!cb_priv) return -ENOMEM; @@ -1295,26 +1430,32 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - err = tcf_block_cb_register(f->block, - nfp_flower_setup_indr_block_cb, - cb_priv, cb_priv, f->extack); - if (err) { + block_cb = flow_block_cb_alloc(f->net, + nfp_flower_setup_indr_block_cb, + cb_priv, cb_priv, + nfp_flower_setup_indr_tc_release); + if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); + return PTR_ERR(block_cb); } - return err; - case TC_BLOCK_UNBIND: + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); if (!cb_priv) return -ENOENT; - tcf_block_cb_unregister(f->block, - nfp_flower_setup_indr_block_cb, - cb_priv); - list_del(&cb_priv->list); - kfree(cb_priv); + block_cb = flow_block_cb_lookup(f, + nfp_flower_setup_indr_block_cb, + cb_priv); + if (!block_cb) + return -ENOENT; + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; |