summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-01-20 22:59:25 +0300
committerJakub Kicinski <kuba@kernel.org>2025-01-20 22:59:25 +0300
commit4fd001f5f32deb4c5e9dcd05612f8371b79d380e (patch)
tree381f57ef5f1f6a84dbb0ebf8d33fd8325e8eb121 /include/net
parent01f5f35ae4c96f127b997be2af7cf91f21a92d7f (diff)
parentfdbaf5163331342e90a2c29b87629021f4c15f0c (diff)
downloadlinux-4fd001f5f32deb4c5e9dcd05612f8371b79d380e.tar.xz
Merge tag 'nf-next-25-01-19' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following batch contains Netfilter updates for net-next: 1) Unbreak set size settings for rbtree set backend, intervals in rbtree are represented as two elements, this detailed is leaked to userspace leading to bogus ENOSPC from control plane. 2) Remove dead code in br_netfilter's br_nf_pre_routing_finish() due to never matching error when looking up for route, from Antoine Tenart. 3) Simplify check for device already in use in flowtable, from Phil Sutter. 4) Three patches to restore interface name field in struct nft_hook and use it, this is to prepare for wildcard interface support. From Phil Sutter. 5) Do not remove netdev basechain when last device is gone, this is for consistency with the flowtable behaviour. This allows for netdev basechains without devices. Another patch to simplify netdev event notifier after this update. Also from Phil. 6) Two patches to add missing spinlock when flowtable updates TCP state flags, from Florian Westphal. 7) Simplify __nf_ct_refresh_acct() by removing skbuff parameter, also from Florian. 8) Flowtable gc now extends ct timeout for offloaded flow. This is to address a possible race that leads to handing over flow to classic path with long ct timeouts. 9) Tear down flow if cached rt_mtu is stale, before this patch, packet is handed over to classic path but flow entry still remained in place. 10) Revisit the flowtable teardown strategy, which was originally designed to release flowtable hardware entries early. Add a new CLOSING flag that still allows hardware to release entries when fin/rst is seen, but keeps the flow entry in place when the TCP connection is closed. Release flow after timeout or when a new syn packet is seen for TCP reopen scenario. * tag 'nf-next-25-01-19' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next: netfilter: flowtable: add CLOSING state netfilter: flowtable: teardown flow if cached mtu is stale netfilter: conntrack: rework offload nf_conn timeout extension logic netfilter: conntrack: remove skb argument from nf_ct_refresh netfilter: nft_flow_offload: update tcp state flags under lock netfilter: nft_flow_offload: clear tcp MAXACK flag before moving to slowpath netfilter: nf_tables: Simplify chain netdev notifier netfilter: nf_tables: Tolerate chains with no remaining hooks netfilter: nf_tables: Compare netdev hooks based on stored name netfilter: nf_tables: Use stored ifname in netdev hook dumps netfilter: nf_tables: Store user-defined hook ifname netfilter: nf_tables: Flowtable hook's pf value never varies netfilter: br_netfilter: remove unused conditional and dead code netfilter: nf_tables: fix set size with rbtree backend ==================== Link: https://patch.msgid.link/20250119172051.8261-1-pablo@netfilter.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/netfilter/nf_conntrack.h18
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netfilter/nf_tables.h10
3 files changed, 12 insertions, 17 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cba3ccf03fcc..3f02a45773e8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -204,8 +204,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple);
void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- u32 extra_jiffies, bool do_acct);
+ u32 extra_jiffies, unsigned int bytes);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -213,15 +212,14 @@ static inline void nf_ct_refresh_acct(struct nf_conn *ct,
const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+ __nf_ct_refresh_acct(ct, ctinfo, extra_jiffies, skb->len);
}
/* Refresh conntrack for this many jiffies */
static inline void nf_ct_refresh(struct nf_conn *ct,
- const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+ __nf_ct_refresh_acct(ct, 0, extra_jiffies, 0);
}
/* kill conntrack and do accounting */
@@ -314,16 +312,6 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
#define NF_CT_DAY (86400 * HZ)
-/* Set an arbitrary timeout large enough not to ever expire, this save
- * us a check for the IPS_OFFLOAD_BIT from the packet path via
- * nf_ct_is_expired().
- */
-static inline void nf_ct_offload_timeout(struct nf_conn *ct)
-{
- if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
-}
-
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index b63d53bb9dd6..d711642e78b5 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -163,6 +163,7 @@ struct flow_offload_tuple_rhash {
enum nf_flow_flags {
NF_FLOW_SNAT,
NF_FLOW_DNAT,
+ NF_FLOW_CLOSING,
NF_FLOW_TEARDOWN,
NF_FLOW_HW,
NF_FLOW_HW_DYING,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0027beca5cd5..60d5dcdb289c 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -442,6 +442,9 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @ksize: kernel set size
+ * @usize: userspace set size
+ * @adjust_maxsize: delta to adjust maximum set size
* @commit: commit set elements
* @abort: abort set elements
* @privsize: function to return size of set private data
@@ -495,6 +498,9 @@ struct nft_set_ops {
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
+ u32 (*ksize)(u32 size);
+ u32 (*usize)(u32 size);
+ u32 (*adjust_maxsize)(const struct nft_set *set);
void (*commit)(struct nft_set *set);
void (*abort)(const struct nft_set *set);
u64 (*privsize)(const struct nlattr * const nla[],
@@ -1195,6 +1201,8 @@ struct nft_hook {
struct list_head list;
struct nf_hook_ops ops;
struct rcu_head rcu;
+ char ifname[IFNAMSIZ];
+ u8 ifnamelen;
};
/**
@@ -1230,8 +1238,6 @@ static inline bool nft_is_base_chain(const struct nft_chain *chain)
return chain->flags & NFT_CHAIN_BASE;
}
-int __nft_release_basechain(struct nft_ctx *ctx);
-
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
static inline bool nft_use_inc(u32 *use)