summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch
new file mode 100644
index 000000000..b9f68cca6
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2024-26602.patch
@@ -0,0 +1,89 @@
+From 2441a64070b85c14eecc3728cc87e883f953f265 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linuxfoundation.org>
+Date: Sun, 4 Feb 2024 15:25:12 +0000
+Subject: sched/membarrier: reduce the ability to hammer on sys_membarrier
+
+commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream.
+
+On some systems, sys_membarrier can be very expensive, causing overall
+slowdowns for everything. So put a lock on the path in order to
+serialize the accesses to prevent the ability for this to be called at
+too high of a frequency and saturate the machine.
+
+Reviewed-and-tested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Borislav Petkov <bp@alien8.de>
+Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command")
+Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ converted to explicit mutex_*() calls - cleanup.h is not in this stable
+ branch - gregkh ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/membarrier.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index b5add64d9698..0b5e3e520bf6 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -161,6 +161,8 @@
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
+ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
+
++static DEFINE_MUTEX(membarrier_ipi_mutex);
++
+ static void ipi_mb(void *info)
+ {
+ smp_mb(); /* IPIs should be serializing but paranoid. */
+@@ -258,6 +260,7 @@ static int membarrier_global_expedited(void)
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
++ mutex_lock(&membarrier_ipi_mutex);
+ cpus_read_lock();
+ rcu_read_lock();
+ for_each_online_cpu(cpu) {
+@@ -303,6 +306,8 @@ static int membarrier_global_expedited(void)
+ * rq->curr modification in scheduler.
+ */
+ smp_mb(); /* exit from system call is not a mb */
++ mutex_unlock(&membarrier_ipi_mutex);
++
+ return 0;
+ }
+
+@@ -346,6 +351,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
+ if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
++ mutex_lock(&membarrier_ipi_mutex);
+ cpus_read_lock();
+
+ if (cpu_id >= 0) {
+@@ -418,6 +424,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
+ * rq->curr modification in scheduler.
+ */
+ smp_mb(); /* exit from system call is not a mb */
++ mutex_unlock(&membarrier_ipi_mutex);
+
+ return 0;
+ }
+@@ -459,6 +466,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
+ * between threads which are users of @mm has its membarrier state
+ * updated.
+ */
++ mutex_lock(&membarrier_ipi_mutex);
+ cpus_read_lock();
+ rcu_read_lock();
+ for_each_online_cpu(cpu) {
+@@ -475,6 +483,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
+
+ free_cpumask_var(tmpmask);
+ cpus_read_unlock();
++ mutex_unlock(&membarrier_ipi_mutex);
+
+ return 0;
+ }
+--
+2.25.1
+