summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-03-02 21:14:28 +0300
committerJakub Kicinski <kuba@kernel.org>2026-03-05 03:54:09 +0300
commit9cde131cdd888873363b5d9dfd8d4d4c1fae6986 (patch)
tree1ceb14f3759bd6d8d62ca4fae81cb5374c57a7bf /net
parent61753849b8bc6420cc5834fb3de331ce1134060d (diff)
downloadlinux-9cde131cdd888873363b5d9dfd8d4d4c1fae6986.tar.xz
net-sysfs: add rps_sock_flow_table_mask() helper
In preparation of the following patch, abstract access to the @mask field in 'struct rps_sock_flow_table'. Also cleanup rps_sock_flow_sysctl() a bit : - Rename orig_sock_table to o_sock_table. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20260302181432.1836150-4-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/sysctl_net_core.c19
2 files changed, 13 insertions, 10 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 19b84eaa2643..92f8eeac8de3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5112,12 +5112,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
+ u32 flow_id;
u32 ident;
/* First check into global flow table if there is a match.
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
- ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+ flow_id = hash & rps_sock_flow_table_mask(sock_flow_table);
+ ident = READ_ONCE(sock_flow_table->ents[flow_id]);
if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0b659c932cff..cfbe798493b5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -145,16 +145,17 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
.maxlen = sizeof(size),
.mode = table->mode
};
- struct rps_sock_flow_table *orig_sock_table, *sock_table;
+ struct rps_sock_flow_table *o_sock_table, *sock_table;
static DEFINE_MUTEX(sock_flow_mutex);
void *tofree = NULL;
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rcu_dereference_protected(
+ o_sock_table = rcu_dereference_protected(
net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
- size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
+ size = o_sock_table ? rps_sock_flow_table_mask(o_sock_table) + 1 : 0;
+ orig_size = size;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
@@ -165,6 +166,7 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -EINVAL;
}
+ sock_table = o_sock_table;
size = roundup_pow_of_two(size);
if (size != orig_size) {
sock_table =
@@ -175,26 +177,25 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
}
net_hotdata.rps_cpu_mask =
roundup_pow_of_two(nr_cpu_ids) - 1;
- sock_table->mask = size - 1;
- } else
- sock_table = orig_sock_table;
+ sock_table->_mask = size - 1;
+ }
for (i = 0; i < size; i++)
sock_table->ents[i] = RPS_NO_CPU;
} else
sock_table = NULL;
- if (sock_table != orig_sock_table) {
+ if (sock_table != o_sock_table) {
rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
}
- if (orig_sock_table) {
+ if (o_sock_table) {
static_branch_dec(&rps_needed);
static_branch_dec(&rfs_needed);
- tofree = orig_sock_table;
+ tofree = o_sock_table;
}
}
}