summaryrefslogtreecommitdiff
path: root/net/mptcp/pm.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2025-03-20 12:14:52 +0300
committerPaolo Abeni <pabeni@redhat.com>2025-03-20 12:14:53 +0300
commit6855b9be9cf70d3fd4b4b9a00696eae65335320c (patch)
tree6c11df36464c5b02c529c0e1868066674e215448 /net/mptcp/pm.c
parent27b918007d96402aba10ed52a6af8015230f1793 (diff)
parent9cf0128e64ab4ecd46f734d5c9870549cd29053f (diff)
downloadlinux-6855b9be9cf70d3fd4b4b9a00696eae65335320c.tar.xz
Merge branch 'mptcp-pm-prep-work-for-new-ops-and-sysctl-knobs'
Matthieu Baerts says: ==================== mptcp: pm: prep work for new ops and sysctl knobs Here are a few cleanups, preparation work for the new PM ops, and sysctl knobs. - Patch 1: reorg: move generic NL code used by all PMs to pm_netlink.c. - Patch 2: use kmemdup() instead of kmalloc + copy. - Patch 3: small cleanup to use pm var instead of msk->pm. - Patch 4: reorg: id_avail_bitmap is only used by the in-kernel PM. - Patch 5: use struct_group to easily reset a subset of PM data vars. - Patch 6: introduce the minimal skeleton for the new PM ops. - Patch 7: register in-kernel and userspace PM ops. - Patch 8: new net.mptcp.path_manager sysctl knob, deprecating pm_type. - Patch 9: map the new path_manager sysctl knob with pm_type. - Patch 10: map the old pm_type sysctl knob with path_manager. - Patch 11: new net.mptcp.available_path_managers sysctl knob. - Patch 12: new test to validate path_manager and pm_type mapping. Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> ==================== Link: https://patch.msgid.link/20250313-net-next-mptcp-pm-ops-intro-v1-0-f4e4a88efc50@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/mptcp/pm.c')
-rw-r--r--net/mptcp/pm.c97
1 files changed, 81 insertions, 16 deletions
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 833839d7286e..18b19dbccbba 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -5,6 +5,8 @@
*/
#define pr_fmt(fmt) "MPTCP: " fmt
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
#include "protocol.h"
#include "mib.h"
@@ -18,6 +20,9 @@ struct mptcp_pm_add_entry {
struct mptcp_sock *sock;
};
+static DEFINE_SPINLOCK(mptcp_pm_list_lock);
+static LIST_HEAD(mptcp_pm_list);
+
/* path manager helpers */
/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
@@ -511,13 +516,13 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
* be sure to serve this event only once.
*/
if (READ_ONCE(pm->work_pending) &&
- !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
+ !(pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
- if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
+ if ((pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
announce = true;
- msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
+ pm->status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
spin_unlock_bh(&pm->lock);
if (announce)
@@ -978,10 +983,7 @@ void mptcp_pm_data_reset(struct mptcp_sock *msk)
u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
struct mptcp_pm_data *pm = &msk->pm;
- pm->add_addr_signaled = 0;
- pm->add_addr_accepted = 0;
- pm->local_addr_used = 0;
- pm->subflows = 0;
+ memset(&pm->reset, 0, sizeof(pm->reset));
pm->rm_list_tx.nr = 0;
pm->rm_list_rx.nr = 0;
WRITE_ONCE(pm->pm_type, pm_type);
@@ -1000,16 +1002,9 @@ void mptcp_pm_data_reset(struct mptcp_sock *msk)
!!mptcp_pm_get_add_addr_accept_max(msk) &&
subflows_allowed);
WRITE_ONCE(pm->accept_subflow, subflows_allowed);
- } else {
- WRITE_ONCE(pm->work_pending, 0);
- WRITE_ONCE(pm->accept_addr, 0);
- WRITE_ONCE(pm->accept_subflow, 0);
- }
- WRITE_ONCE(pm->addr_signal, 0);
- WRITE_ONCE(pm->remote_deny_join_id0, false);
- pm->status = 0;
- bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ bitmap_fill(pm->id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ }
}
void mptcp_pm_data_init(struct mptcp_sock *msk)
@@ -1022,5 +1017,75 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
void __init mptcp_pm_init(void)
{
+ mptcp_pm_kernel_register();
+ mptcp_pm_userspace_register();
mptcp_pm_nl_init();
}
+
+/* Must be called with rcu read lock held */
+struct mptcp_pm_ops *mptcp_pm_find(const char *name)
+{
+ struct mptcp_pm_ops *pm_ops;
+
+ list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
+ if (!strcmp(pm_ops->name, name))
+ return pm_ops;
+ }
+
+ return NULL;
+}
+
+int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops)
+{
+ return 0;
+}
+
+int mptcp_pm_register(struct mptcp_pm_ops *pm_ops)
+{
+ int ret;
+
+ ret = mptcp_pm_validate(pm_ops);
+ if (ret)
+ return ret;
+
+ spin_lock(&mptcp_pm_list_lock);
+ if (mptcp_pm_find(pm_ops->name)) {
+ spin_unlock(&mptcp_pm_list_lock);
+ return -EEXIST;
+ }
+ list_add_tail_rcu(&pm_ops->list, &mptcp_pm_list);
+ spin_unlock(&mptcp_pm_list_lock);
+
+ pr_debug("%s registered\n", pm_ops->name);
+ return 0;
+}
+
+void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops)
+{
+ /* skip unregistering the default path manager */
+ if (WARN_ON_ONCE(pm_ops == &mptcp_pm_kernel))
+ return;
+
+ spin_lock(&mptcp_pm_list_lock);
+ list_del_rcu(&pm_ops->list);
+ spin_unlock(&mptcp_pm_list_lock);
+}
+
+/* Build string with list of available path manager values.
+ * Similar to tcp_get_available_congestion_control()
+ */
+void mptcp_pm_get_available(char *buf, size_t maxlen)
+{
+ struct mptcp_pm_ops *pm_ops;
+ size_t offs = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
+ offs += snprintf(buf + offs, maxlen - offs, "%s%s",
+ offs == 0 ? "" : " ", pm_ops->name);
+
+ if (WARN_ON_ONCE(offs >= maxlen))
+ break;
+ }
+ rcu_read_unlock();
+}