summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Johansen <john.johansen@canonical.com>2026-01-21 05:18:51 +0300
committerJohn Johansen <john.johansen@canonical.com>2026-01-29 12:27:55 +0300
commit0b6a6b72b329c34a13a13908d5e8df9fb46eaa1f (patch)
treefb6a28ea756a64d33849de444c5e3d57e3c820ee
parent640cf2f09575c9dc344b3f7be2498d31e3923ead (diff)
downloadlinux-0b6a6b72b329c34a13a13908d5e8df9fb46eaa1f.tar.xz
apparmor: document the buffer hold, add an overflow guard
The buffer hold is a measure of contention, but it is tracked per cpu where the lock is a globabl resource. On some systems (eg. real time) there is no guarantee that the code will be on the same cpu pre, and post spinlock acquisition, nor that the buffer will be put back to the same percpu cache when we are done with it. Because of this the hold value can move asynchronous to the buffers on the cache, meaning it is possible to underflow, and potentially in really pathelogical cases overflow. Reviewed-by: Georgia Garcia <georgia.garcia@canonical.com> Signed-off-by: John Johansen <john.johansen@canonical.com>
-rw-r--r--security/apparmor/lsm.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 9175fd677ef3..34fc458887d7 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -2125,6 +2125,23 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
return 0;
}
+/* arbitrary cap on how long to hold buffer because contention was
+ * encountered before trying to put it back into the global pool
+ */
+#define MAX_HOLD_COUNT 64
+
+/* the hold count is a heuristic for lock contention, and can be
+ * incremented async to actual buffer alloc/free. Because buffers
+ * may be put back onto a percpu cache different than the ->hold was
+ * added to the counts can be out of sync. Guard against underflow
+ * and overflow
+ */
+static void cache_hold_inc(unsigned int *hold)
+{
+ if (*hold > MAX_HOLD_COUNT)
+ (*hold)++;
+}
+
char *aa_get_buffer(bool in_atomic)
{
union aa_buffer *aa_buf;
@@ -2143,11 +2160,18 @@ char *aa_get_buffer(bool in_atomic)
put_cpu_ptr(&aa_local_buffers);
return &aa_buf->buffer[0];
}
+ /* exit percpu as spinlocks may sleep on realtime kernels */
put_cpu_ptr(&aa_local_buffers);
if (!spin_trylock(&aa_buffers_lock)) {
+ /* had contention on lock so increase hold count. Doesn't
+ * really matter if recorded before or after the spin lock
+ * as there is no way to guarantee the buffer will be put
+ * back on the same percpu cache. Instead rely on holds
+ * roughly averaging out over time.
+ */
cache = get_cpu_ptr(&aa_local_buffers);
- cache->hold += 1;
+ cache_hold_inc(&cache->hold);
put_cpu_ptr(&aa_local_buffers);
spin_lock(&aa_buffers_lock);
} else {
@@ -2213,7 +2237,7 @@ void aa_put_buffer(char *buf)
}
/* contention on global list, fallback to percpu */
cache = get_cpu_ptr(&aa_local_buffers);
- cache->hold += 1;
+ cache_hold_inc(&cache->hold);
}
/* cache in percpu list */