summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome/nfp/flower
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/flower')
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c1180
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h231
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c129
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
6 files changed, 1581 insertions, 7 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
new file mode 100644
index 000000000000..273d529d43c2
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -0,0 +1,1180 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#include "conntrack.h"
+
+const struct rhashtable_params nfp_tc_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 2,
+ .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+const struct rhashtable_params nfp_nft_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 3,
+ .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id);
+
+/**
+ * get_hashentry() - Wrapper around hashtable lookup.
+ * @ht: hashtable where entry could be found
+ * @key: key to lookup
+ * @params: hashtable params
+ * @size: size of entry to allocate if not in table
+ *
+ * Returns an entry from a hashtable. If entry does not exist
+ * yet allocate the memory for it and return the new entry.
+ */
+static void *get_hashentry(struct rhashtable *ht, void *key,
+ const struct rhashtable_params params, size_t size)
+{
+ void *result;
+
+ result = rhashtable_lookup_fast(ht, key, params);
+
+ if (result)
+ return result;
+
+ result = kzalloc(size, GFP_KERNEL);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ return result;
+}
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT && !act->ct.action)
+ return true;
+ }
+ return false;
+}
+
+bool is_post_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
+ return true;
+ }
+ return false;
+}
+
+static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
+ entry2->rule->match.dissector->used_keys;
+ bool out;
+
+ /* check the overlapped fields one by one, the unmasked part
+ * should not conflict with each other.
+ */
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_control(entry1->rule, &match1);
+ flow_rule_match_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match1, match2;
+
+ flow_rule_match_basic(entry1->rule, &match1);
+ flow_rule_match_basic(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match1, match2;
+
+ flow_rule_match_ports(entry1->rule, &match1);
+ flow_rule_match_ports(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match1, match2;
+
+ flow_rule_match_eth_addrs(entry1->rule, &match1);
+ flow_rule_match_eth_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match1, match2;
+
+ flow_rule_match_vlan(entry1->rule, &match1);
+ flow_rule_match_vlan(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match1, match2;
+
+ flow_rule_match_mpls(entry1->rule, &match1);
+ flow_rule_match_mpls(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match1, match2;
+
+ flow_rule_match_tcp(entry1->rule, &match1);
+ flow_rule_match_tcp(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_ip(entry1->rule, &match1);
+ flow_rule_match_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match1, match2;
+
+ flow_rule_match_enc_keyid(entry1->rule, &match1);
+ flow_rule_match_enc_keyid(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_enc_control(entry1->rule, &match1);
+ flow_rule_match_enc_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_enc_ip(entry1->rule, &match1);
+ flow_rule_match_enc_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match1, match2;
+
+ flow_rule_match_enc_opts(entry1->rule, &match1);
+ flow_rule_match_enc_opts(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ return 0;
+
+check_failed:
+ return -EINVAL;
+}
+
+static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ enum flow_action_mangle_base htype = a_in->mangle.htype;
+ u32 offset = a_in->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
+ return -EOPNOTSUPP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == offsetof(struct iphdr, ttl) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (offset == offsetof(struct iphdr, saddr) &&
+ match.mask->src)
+ return -EOPNOTSUPP;
+ if (offset == offsetof(struct iphdr, daddr) &&
+ match.mask->dst)
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ /* for ipv6, tos and flow_lbl are in the same word */
+ if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offsetof(struct ipv6hdr, daddr) &&
+ memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
+ return -EOPNOTSUPP;
+ if (offset >= offsetof(struct ipv6hdr, daddr) &&
+ offset < sizeof(struct ipv6hdr) &&
+ memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* currently only can modify ports */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
+ struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_action_entry *act;
+ int err, i;
+
+ /* Check for pre_ct->action conflicts */
+ flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
+ if (err)
+ return err;
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+
+ /* Check for nft->action conflicts */
+ flow_action_for_each(i, act, &nft_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
+ struct flow_action_entry *ct_met;
+ struct flow_match_ct ct;
+ int i;
+
+ ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
+ if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
+ u32 *act_lbl;
+
+ act_lbl = ct_met->ct_metadata.labels;
+ flow_rule_match_ct(post_ct_entry->rule, &ct);
+ for (i = 0; i < 4; i++) {
+ if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
+ (act_lbl[i] & ct.mask->ct_labels[i]))
+ return -EINVAL;
+ }
+
+ if ((ct.key->ct_mark & ct.mask->ct_mark) ^
+ (ct_met->ct_metadata.mark & ct.mask->ct_mark))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ return 0;
+}
+
+static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
+ struct net_device *netdev)
+{
+ return 0;
+}
+
+static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_tc_merge *tc_m_entry)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_m_entry;
+ unsigned long new_cookie[3];
+ int err;
+
+ pre_ct_entry = tc_m_entry->pre_ct_parent;
+ post_ct_entry = tc_m_entry->post_ct_parent;
+
+ err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Check that the two tc flows are also compatible with
+ * the nft entry. No need to check the pre_ct and post_ct
+ * entries as that was already done during pre_merge.
+ * The nft entry does not have a netdev or chain populated, so
+ * skip this check.
+ */
+ err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_check_meta(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Combine tc_merge and nft cookies for this cookie. */
+ new_cookie[0] = tc_m_entry->cookie[0];
+ new_cookie[1] = tc_m_entry->cookie[1];
+ new_cookie[2] = nft_entry->cookie;
+ nft_m_entry = get_hashentry(&zt->nft_merge_tb,
+ &new_cookie,
+ nfp_nft_ct_merge_params,
+ sizeof(*nft_m_entry));
+
+ if (IS_ERR(nft_m_entry))
+ return PTR_ERR(nft_m_entry);
+
+ /* nft_m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ nft_m_entry->zt = zt;
+ nft_m_entry->tc_m_parent = tc_m_entry;
+ nft_m_entry->nft_parent = nft_entry;
+ nft_m_entry->tc_flower_cookie = 0;
+ /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ * it only combined them if the netdevs were the same, so can use any of them.
+ */
+ nft_m_entry->netdev = pre_ct_entry->netdev;
+
+ /* Add this entry to the tc_m_list and nft_flow lists */
+ list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
+ list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
+
+ /* Generate offload structure and send to nfp */
+ err = nfp_fl_ct_add_offload(nft_m_entry);
+ if (err)
+ goto err_nft_ct_offload;
+
+ err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
+ nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_ct_merge_insert;
+
+ zt->nft_merge_count++;
+
+ return err;
+
+err_nft_ct_merge_insert:
+ nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
+ nft_m_entry->netdev);
+err_nft_ct_offload:
+ list_del(&nft_m_entry->tc_merge_list);
+ list_del(&nft_m_entry->nft_flow_list);
+ kfree(nft_m_entry);
+ return err;
+}
+
+static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_flow_entry *ct_entry2)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
+ struct nfp_fl_ct_tc_merge *m_entry;
+ unsigned long new_cookie[2];
+ int err;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT) {
+ pre_ct_entry = ct_entry1;
+ post_ct_entry = ct_entry2;
+ } else {
+ post_ct_entry = ct_entry1;
+ pre_ct_entry = ct_entry2;
+ }
+
+ if (post_ct_entry->netdev != pre_ct_entry->netdev)
+ return -EINVAL;
+ /* Checks that the chain_index of the filter matches the
+ * chain_index of the GOTO action.
+ */
+ if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
+ return -EINVAL;
+
+ err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
+ if (err)
+ return err;
+
+ new_cookie[0] = pre_ct_entry->cookie;
+ new_cookie[1] = post_ct_entry->cookie;
+ m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
+ nfp_tc_ct_merge_params, sizeof(*m_entry));
+ if (IS_ERR(m_entry))
+ return PTR_ERR(m_entry);
+
+ /* m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ m_entry->zt = zt;
+ m_entry->post_ct_parent = post_ct_entry;
+ m_entry->pre_ct_parent = pre_ct_entry;
+
+ /* Add this entry to the pre_ct and post_ct lists */
+ list_add(&m_entry->post_ct_list, &post_ct_entry->children);
+ list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
+ INIT_LIST_HEAD(&m_entry->children);
+
+ err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ goto err_ct_tc_merge_insert;
+ zt->tc_merge_count++;
+
+ /* Merge with existing nft flows */
+ list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
+ list_node) {
+ nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
+ }
+
+ return 0;
+
+err_ct_tc_merge_insert:
+ list_del(&m_entry->post_ct_list);
+ list_del(&m_entry->pre_ct_list);
+ kfree(m_entry);
+ return err;
+}
+
+static struct
+nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
+ u16 zone, bool wildcarded)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ if (wildcarded && priv->ct_zone_wc)
+ return priv->ct_zone_wc;
+
+ if (!wildcarded) {
+ zt = get_hashentry(&priv->ct_zone_table, &zone,
+ nfp_zone_table_params, sizeof(*zt));
+
+ /* If priv is set this is an existing entry, just return it */
+ if (IS_ERR(zt) || zt->priv)
+ return zt;
+ } else {
+ zt = kzalloc(sizeof(*zt), GFP_KERNEL);
+ if (!zt)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ zt->zone = zone;
+ zt->priv = priv;
+ zt->nft = NULL;
+
+ /* init the various hash tables and lists*/
+ INIT_LIST_HEAD(&zt->pre_ct_list);
+ INIT_LIST_HEAD(&zt->post_ct_list);
+ INIT_LIST_HEAD(&zt->nft_flows_list);
+
+ err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
+ if (err)
+ goto err_tc_merge_tb_init;
+
+ err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_merge_tb_init;
+
+ if (wildcarded) {
+ priv->ct_zone_wc = zt;
+ } else {
+ err = rhashtable_insert_fast(&priv->ct_zone_table,
+ &zt->hash_node,
+ nfp_zone_table_params);
+ if (err)
+ goto err_zone_insert;
+ }
+
+ return zt;
+
+err_zone_insert:
+ rhashtable_destroy(&zt->nft_merge_tb);
+err_nft_merge_tb_init:
+ rhashtable_destroy(&zt->tc_merge_tb);
+err_tc_merge_tb_init:
+ kfree(zt);
+ return ERR_PTR(err);
+}
+
+static struct
+nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ bool is_nft, struct netlink_ext_ack *extack)
+{
+ struct nf_flow_match *nft_match = NULL;
+ struct nfp_fl_ct_flow_entry *entry;
+ struct nfp_fl_ct_map_entry *map;
+ struct flow_action_entry *act;
+ int err, i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
+ if (!entry->rule) {
+ err = -ENOMEM;
+ goto err_pre_ct_rule;
+ }
+
+ /* nft flows gets destroyed after callback return, so need
+ * to do a full copy instead of just a reference.
+ */
+ if (is_nft) {
+ nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
+ if (!nft_match) {
+ err = -ENOMEM;
+ goto err_pre_ct_act;
+ }
+ memcpy(&nft_match->dissector, flow->rule->match.dissector,
+ sizeof(nft_match->dissector));
+ memcpy(&nft_match->mask, flow->rule->match.mask,
+ sizeof(nft_match->mask));
+ memcpy(&nft_match->key, flow->rule->match.key,
+ sizeof(nft_match->key));
+ entry->rule->match.dissector = &nft_match->dissector;
+ entry->rule->match.mask = &nft_match->mask;
+ entry->rule->match.key = &nft_match->key;
+ } else {
+ entry->rule->match.dissector = flow->rule->match.dissector;
+ entry->rule->match.mask = flow->rule->match.mask;
+ entry->rule->match.key = flow->rule->match.key;
+ }
+
+ entry->zt = zt;
+ entry->netdev = netdev;
+ entry->cookie = flow->cookie;
+ entry->chain_index = flow->common.chain_index;
+ entry->tun_offset = NFP_FL_CT_NO_TUN;
+
+ /* Copy over action data. Unfortunately we do not get a handle to the
+ * original tcf_action data, and the flow objects gets destroyed, so we
+ * cannot just save a pointer to this either, so need to copy over the
+ * data unfortunately.
+ */
+ entry->rule->action.num_entries = flow->rule->action.num_entries;
+ flow_action_for_each(i, act, &flow->rule->action) {
+ struct flow_action_entry *new_act;
+
+ new_act = &entry->rule->action.entries[i];
+ memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* Entunnel is a special case, need to allocate and copy
+ * tunnel info.
+ */
+ if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
+ struct ip_tunnel_info *tun = act->tunnel;
+ size_t tun_size = sizeof(*tun) + tun->options_len;
+
+ new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
+ if (!new_act->tunnel) {
+ err = -ENOMEM;
+ goto err_pre_ct_tun_cp;
+ }
+ entry->tun_offset = i;
+ }
+ }
+
+ INIT_LIST_HEAD(&entry->children);
+
+ /* Now add a ct map entry to flower-priv */
+ map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params, sizeof(*map));
+ if (IS_ERR(map)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry creation failed");
+ err = -ENOMEM;
+ goto err_ct_flow_insert;
+ }
+ map->cookie = flow->cookie;
+ map->ct_entry = entry;
+ err = rhashtable_insert_fast(&zt->priv->ct_map_table,
+ &map->hash_node,
+ nfp_ct_map_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry table add failed");
+ goto err_map_insert;
+ }
+
+ return entry;
+
+err_map_insert:
+ kfree(map);
+err_ct_flow_insert:
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+err_pre_ct_tun_cp:
+ kfree(nft_match);
+err_pre_ct_act:
+ kfree(entry->rule);
+err_pre_ct_rule:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_entry->zt;
+
+ /* Flow is in HW, need to delete */
+ if (m_entry->tc_flower_cookie) {
+ err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
+ m_entry->netdev);
+ if (err)
+ return;
+ }
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
+ &m_entry->hash_node,
+ nfp_nft_ct_merge_params));
+ zt->nft_merge_count--;
+ list_del(&m_entry->tc_merge_list);
+ list_del(&m_entry->nft_flow_list);
+
+ kfree(m_entry);
+}
+
+static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
+{
+ struct nfp_fl_nft_tc_merge *m_entry, *tmp;
+
+ /* These post entries are parts of two lists, one is a list of nft_entries
+ * and the other is of from a list of tc_merge structures. Iterate
+ * through the relevant list and cleanup the entries.
+ */
+
+ if (is_nft_flow) {
+ /* Need to iterate through list of nft_flow entries*/
+ struct nfp_fl_ct_flow_entry *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ nft_flow_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ } else {
+ /* Need to iterate through list of tc_merged_flow entries*/
+ struct nfp_fl_ct_tc_merge *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ tc_merge_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ }
+}
+
+static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_ent->zt;
+ err = rhashtable_remove_fast(&zt->tc_merge_tb,
+ &m_ent->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ pr_warn("WARNING: could not remove merge_entry from hashtable\n");
+ zt->tc_merge_count--;
+ list_del(&m_ent->post_ct_list);
+ list_del(&m_ent->pre_ct_list);
+
+ if (!list_empty(&m_ent->children))
+ nfp_free_nft_merge_children(m_ent, false);
+ kfree(m_ent);
+}
+
+static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
+{
+ struct nfp_fl_ct_tc_merge *m_ent, *tmp;
+
+ switch (entry->type) {
+ case CT_TYPE_PRE_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
+{
+ list_del(&entry->list_node);
+
+ if (!list_empty(&entry->children)) {
+ if (entry->type == CT_TYPE_NFT)
+ nfp_free_nft_merge_children(entry, true);
+ else
+ nfp_free_tc_merge_children(entry);
+ }
+
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+
+ if (entry->type == CT_TYPE_NFT) {
+ struct nf_flow_match *nft_match;
+
+ nft_match = container_of(entry->rule->match.dissector,
+ struct nf_flow_match, dissector);
+ kfree(nft_match);
+ }
+
+ kfree(entry->rule);
+ kfree(entry);
+}
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id)
+{
+ struct flow_action_entry *act = NULL;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == act_id)
+ return act;
+ }
+ return NULL;
+}
+
+static void
+nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_zone_entry *zt_src,
+ struct nfp_fl_ct_zone_entry *zt_dst)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
+ struct list_head *ct_list;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT)
+ ct_list = &zt_src->post_ct_list;
+ else if (ct_entry1->type == CT_TYPE_POST_CT)
+ ct_list = &zt_src->pre_ct_list;
+ else
+ return;
+
+ list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
+ list_node) {
+ nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
+ }
+}
+
+static void
+nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_tc_merge *tc_merge_entry;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
+ rhashtable_walk_start(&iter);
+ while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(tc_merge_entry))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *ct_act, *ct_goto;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
+ if (!ct_act) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack action empty in conntrack offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
+ if (!ct_goto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack requires ACTION_GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ if (!zt->nft) {
+ zt->nft = ct_act->ct.flow_table;
+ err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not register nft_callback");
+ return err;
+ }
+ }
+
+ /* Add entry to pre_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+ ct_entry->type = CT_TYPE_PRE_CT;
+ ct_entry->chain_index = ct_goto->chain_index;
+ list_add(&ct_entry->list_node, &zt->pre_ct_list);
+ zt->pre_ct_count++;
+
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+
+ /* Need to check and merge with tables in the wc_zone as well */
+ if (priv->ct_zone_wc)
+ nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
+
+ return 0;
+}
+
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ bool wildcarded = false;
+ struct flow_match_ct ct;
+
+ flow_rule_match_ct(rule, &ct);
+ if (!ct.mask->ct_zone) {
+ wildcarded = true;
+ } else if (ct.mask->ct_zone != U16_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: partially wildcarded ct_zone is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ /* Add entry to post_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+
+ ct_entry->type = CT_TYPE_POST_CT;
+ ct_entry->chain_index = flow->common.chain_index;
+ list_add(&ct_entry->list_node, &zt->post_ct_list);
+ zt->post_ct_count++;
+
+ if (wildcarded) {
+ /* Iterate through all zone tables if not empty, look for merges with
+ * pre_ct entries and merge them.
+ */
+ struct rhashtable_iter iter;
+ struct nfp_fl_ct_zone_entry *zone_table;
+
+ rhashtable_walk_enter(&priv->ct_zone_table, &iter);
+ rhashtable_walk_start(&iter);
+ while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(zone_table))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ } else {
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
+{
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct netlink_ext_ack *extack = NULL;
+
+ ASSERT_RTNL();
+
+ extack = flow->common.extack;
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ /* Netfilter can request offload multiple times for the same
+ * flow - protect against adding duplicates.
+ */
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (!ct_map_ent) {
+ ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+ ct_entry->type = CT_TYPE_NFT;
+ list_add(&ct_entry->list_node, &zt->nft_flows_list);
+ zt->nft_flows_count++;
+ nfp_ct_merge_nft_with_tc(ct_entry, zt);
+ }
+ return 0;
+ case FLOW_CLS_DESTROY:
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ return nfp_fl_ct_del_flow(ct_map_ent);
+ case FLOW_CLS_STATS:
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *flow = type_data;
+ struct nfp_fl_ct_zone_entry *zt = cb_priv;
+ int err = -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ rtnl_lock();
+ err = nfp_fl_ct_offload_nft_flow(zt, flow);
+ rtnl_unlock();
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static void
+nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+
+ list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
+ list_node) {
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
+ &nft_entry->cookie,
+ nfp_ct_map_params);
+ nfp_fl_ct_del_flow(ct_map_ent);
+ }
+}
+
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct rhashtable *m_table;
+
+ if (!ct_map_ent)
+ return -ENOENT;
+
+ zt = ct_map_ent->ct_entry->zt;
+ ct_entry = ct_map_ent->ct_entry;
+ m_table = &zt->priv->ct_map_table;
+
+ switch (ct_entry->type) {
+ case CT_TYPE_PRE_CT:
+ zt->pre_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+
+ /* If this is the last pre_ct_rule it means that it is
+ * very likely that the nft table will be cleaned up next,
+ * as this happens on the removal of the last act_ct flow.
+ * However we cannot deregister the callback on the removal
+ * of the last nft flow as this runs into a deadlock situation.
+ * So deregister the callback on removal of the last pre_ct flow
+ * and remove any remaining nft flow entries. We also cannot
+ * save this state and delete the callback later since the
+ * nft table would already have been freed at that time.
+ */
+ if (!zt->pre_ct_count) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ nfp_fl_ct_clean_nft_entries(zt);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ zt->post_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+ break;
+ case CT_TYPE_NFT:
+ zt->nft_flows_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
+ kfree(ct_map_ent);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
new file mode 100644
index 000000000000..170b6cdb8cd0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#ifndef __NFP_FLOWER_CONNTRACK_H__
+#define __NFP_FLOWER_CONNTRACK_H__ 1
+
+#include <net/netfilter/nf_flow_table.h>
+#include "main.h"
+
+#define NFP_FL_CT_NO_TUN 0xff
+
+#define COMPARE_UNMASKED_FIELDS(__match1, __match2, __out) \
+ do { \
+ typeof(__match1) _match1 = (__match1); \
+ typeof(__match2) _match2 = (__match2); \
+ bool *_out = (__out); \
+ int i, size = sizeof(*(_match1).key); \
+ char *k1, *m1, *k2, *m2; \
+ *_out = false; \
+ k1 = (char *)_match1.key; \
+ m1 = (char *)_match1.mask; \
+ k2 = (char *)_match2.key; \
+ m2 = (char *)_match2.mask; \
+ for (i = 0; i < size; i++) \
+ if ((k1[i] & m1[i] & m2[i]) ^ \
+ (k2[i] & m1[i] & m2[i])) { \
+ *_out = true; \
+ break; \
+ } \
+ } while (0) \
+
+extern const struct rhashtable_params nfp_zone_table_params;
+extern const struct rhashtable_params nfp_ct_map_params;
+extern const struct rhashtable_params nfp_tc_ct_merge_params;
+extern const struct rhashtable_params nfp_nft_ct_merge_params;
+
+/**
+ * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
+ * @zone: The zone number, used as lookup key in hashtable
+ * @hash_node: Used by the hashtable
+ * @priv: Pointer to nfp_flower_priv data
+ * @nft: Pointer to nf_flowtable for this zone
+ *
+ * @pre_ct_list: The pre_ct_list of nfp_fl_ct_flow_entry entries
+ * @pre_ct_count: Keep count of the number of pre_ct entries
+ *
+ * @post_ct_list: The post_ct_list of nfp_fl_ct_flow_entry entries
+ * @post_ct_count: Keep count of the number of post_ct entries
+ *
+ * @tc_merge_tb: The table of merged tc flows
+ * @tc_merge_count: Keep count of the number of merged tc entries
+ *
+ * @nft_flows_list: The list of nft relatednfp_fl_ct_flow_entry entries
+ * @nft_flows_count: Keep count of the number of nft_flow entries
+ *
+ * @nft_merge_tb: The table of merged tc+nft flows
+ * @nft_merge_count: Keep count of the number of merged tc+nft entries
+ */
+struct nfp_fl_ct_zone_entry {
+ u16 zone;
+ struct rhash_head hash_node;
+
+ struct nfp_flower_priv *priv;
+ struct nf_flowtable *nft;
+
+ struct list_head pre_ct_list;
+ unsigned int pre_ct_count;
+
+ struct list_head post_ct_list;
+ unsigned int post_ct_count;
+
+ struct rhashtable tc_merge_tb;
+ unsigned int tc_merge_count;
+
+ struct list_head nft_flows_list;
+ unsigned int nft_flows_count;
+
+ struct rhashtable nft_merge_tb;
+ unsigned int nft_merge_count;
+};
+
+enum ct_entry_type {
+ CT_TYPE_PRE_CT,
+ CT_TYPE_NFT,
+ CT_TYPE_POST_CT,
+};
+
+/**
+ * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @list_node: Used by the list
+ * @chain_index: Chain index of the original flow
+ * @netdev: netdev structure.
+ * @type: Type of pre-entry from enum ct_entry_type
+ * @zt: Reference to the zone table this belongs to
+ * @children: List of tc_merge flows this flow forms part of
+ * @rule: Reference to the original TC flow rule
+ * @stats: Used to cache stats for updating
+ * @tun_offset: Used to indicate tunnel action offset in action list
+ */
+struct nfp_fl_ct_flow_entry {
+ unsigned long cookie;
+ struct list_head list_node;
+ u32 chain_index;
+ enum ct_entry_type type;
+ struct net_device *netdev;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head children;
+ struct flow_rule *rule;
+ struct flow_stats stats;
+ u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+};
+
+/**
+ * struct nfp_fl_ct_tc_merge - Merge of two flows from tc
+ * @cookie: Flow cookie, combination of pre and post ct cookies
+ * @hash_node: Used by the hashtable
+ * @pre_ct_list: This entry is part of a pre_ct_list
+ * @post_ct_list: This entry is part of a post_ct_list
+ * @zt: Reference to the zone table this belongs to
+ * @pre_ct_parent: The pre_ct_parent
+ * @post_ct_parent: The post_ct_parent
+ * @children: List of nft merged entries
+ */
+struct nfp_fl_ct_tc_merge {
+ unsigned long cookie[2];
+ struct rhash_head hash_node;
+ struct list_head pre_ct_list;
+ struct list_head post_ct_list;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct nfp_fl_ct_flow_entry *pre_ct_parent;
+ struct nfp_fl_ct_flow_entry *post_ct_parent;
+ struct list_head children;
+};
+
+/**
+ * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow
+ * @netdev: Ingress netdev name
+ * @cookie: Flow cookie, combination of tc_merge and nft cookies
+ * @hash_node: Used by the hashtable
+ * @zt: Reference to the zone table this belongs to
+ * @nft_flow_list: This entry is part of a nft_flows_list
+ * @tc_merge_list: This entry is part of a ct_merge_list
+ * @tc_m_parent: The tc_merge parent
+ * @nft_parent: The nft_entry parent
+ * @tc_flower_cookie: The cookie of the flow offloaded to the nfp
+ * @flow_pay: Reference to the offloaded flow struct
+ */
+struct nfp_fl_nft_tc_merge {
+ struct net_device *netdev;
+ unsigned long cookie[3];
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head nft_flow_list;
+ struct list_head tc_merge_list;
+ struct nfp_fl_ct_tc_merge *tc_m_parent;
+ struct nfp_fl_ct_flow_entry *nft_parent;
+ unsigned long tc_flower_cookie;
+ struct nfp_fl_payload *flow_pay;
+};
+
+/**
+ * struct nfp_fl_ct_map_entry - Map between flow cookie and specific ct_flow
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @hash_node: Used by the hashtable
+ * @ct_entry: Pointer to corresponding ct_entry
+ */
+struct nfp_fl_ct_map_entry {
+ unsigned long cookie;
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+};
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow);
+bool is_post_ct_flow(struct flow_cls_offload *flow);
+
+/**
+ * nfp_fl_ct_handle_pre_ct() - Handles -trk conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other +trk+est entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+/**
+ * nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other -trk entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+
+/**
+ * nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
+ * @entry: Flow entry to cleanup
+ */
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry);
+
+/**
+ * nfp_fl_ct_del_flow() - Handle flow_del callbacks for conntrack
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
+
+/**
+ * nfp_fl_ct_handle_nft_flow() - Handle flower flow callbacks for nft table
+ * @type: Type provided by callback
+ * @type_data: Callback data
+ * @cb_priv: Pointer to data provided when registering the callback, in this
+ * case it's the zone table.
+ */
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 31377923ea3d..0fbd682ccf72 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -193,6 +193,9 @@ struct nfp_fl_internal_ports {
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
+ * @ct_zone_table: Hash table used to store the different zones
+ * @ct_zone_wc: Special zone entry for wildcarded zone matches
+ * @ct_map_table: Hash table used to referennce ct flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -227,6 +230,9 @@ struct nfp_flower_priv {
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
struct rhashtable merge_table;
+ struct rhashtable ct_zone_table;
+ struct nfp_fl_ct_zone_entry *ct_zone_wc;
+ struct rhashtable ct_map_table;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 327bb56b3ef5..621113650a9b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -9,6 +9,7 @@
#include <net/pkt_cls.h>
#include "cmsg.h"
+#include "conntrack.h"
#include "main.h"
#include "../nfp_app.h"
@@ -496,6 +497,20 @@ const struct rhashtable_params merge_table_params = {
.key_len = sizeof(u64),
};
+const struct rhashtable_params nfp_zone_table_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
+ .key_len = sizeof(u16),
+ .key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
+ .automatic_shrinking = false,
+};
+
+const struct rhashtable_params nfp_ct_map_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
+ .automatic_shrinking = true,
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -516,6 +531,14 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
+ err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
+ if (err)
+ goto err_free_merge_table;
+
+ err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
+ if (err)
+ goto err_free_ct_zone_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -523,7 +546,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_merge_table;
+ goto err_free_ct_map_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -560,6 +583,10 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_ct_map_table:
+ rhashtable_destroy(&priv->ct_map_table);
+err_free_ct_zone_table:
+ rhashtable_destroy(&priv->ct_zone_table);
err_free_merge_table:
rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
@@ -569,6 +596,100 @@ err_free_flow_table:
return -ENOMEM;
}
+static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
+{
+ if (!zt)
+ return;
+
+ if (!list_empty(&zt->pre_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (!list_empty(&zt->post_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (zt->nft) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ }
+
+ if (!list_empty(&zt->nft_flows_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ rhashtable_free_and_destroy(&zt->tc_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&zt->nft_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+
+ kfree(zt);
+}
+
+static void nfp_free_zone_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_zone_entry *zt = ptr;
+
+ nfp_zone_table_entry_destroy(zt);
+}
+
+static void nfp_free_map_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_map_entry *map = ptr;
+
+ if (!map)
+ return;
+
+ kfree(map);
+}
+
void nfp_flower_metadata_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -582,6 +703,12 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->merge_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->ct_zone_table,
+ nfp_free_zone_table_entry, NULL);
+ nfp_zone_table_entry_destroy(priv->ct_zone_wc);
+
+ rhashtable_free_and_destroy(&priv->ct_map_table,
+ nfp_free_map_table_entry, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e95969c462e4..2406d33356ad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -7,6 +7,7 @@
#include "cmsg.h"
#include "main.h"
+#include "conntrack.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
@@ -1276,6 +1277,20 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return 0;
}
+static bool offload_pre_check(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
+ return false;
+
+ if (flow->common.chain_index)
+ return false;
+
+ return true;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -1302,6 +1317,15 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ if (is_pre_ct_flow(flow))
+ return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
+
+ if (is_post_ct_flow(flow))
+ return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
+
+ if (!offload_pre_check(flow))
+ return -EOPNOTSUPP;
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -1481,6 +1505,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
@@ -1490,6 +1515,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ /* Check ct_map_table */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent) {
+ err = nfp_fl_ct_del_flow(ct_map_ent);
+ return err;
+ }
+
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
@@ -1646,9 +1679,10 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
+ struct flow_cls_common_offload *common = type_data;
struct nfp_repr *repr = cb_priv;
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
+ if (!tc_can_offload_extack(repr->netdev, common->extack))
return -EOPNOTSUPP;
switch (type) {
@@ -1746,10 +1780,6 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct flow_cls_offload *flower = type_data;
-
- if (flower->common.chain_index)
- return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index d19c02e99114..ab70179728f6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -21,7 +21,7 @@
#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
/**
- * struct nfp_tun_pre_run_rule - rule matched before decap
+ * struct nfp_tun_pre_tun_rule - rule matched before decap
* @flags: options for the rule offset
* @port_idx: index of destination MAC address for the rule
* @vlan_tci: VLAN info associated with MAC