diff options
| -rw-r--r-- | include/linux/randomize_kstack.h | 26 | ||||
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | init/main.c | 1 | ||||
| -rw-r--r-- | kernel/fork.c | 2 |
4 files changed, 21 insertions, 12 deletions
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index 1d982dbdd0d0..5d3916ca747c 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -9,7 +9,6 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, randomize_kstack_offset); -DECLARE_PER_CPU(u32, kstack_offset); /* * Do not use this anywhere else in the kernel. This is used here because @@ -50,15 +49,14 @@ DECLARE_PER_CPU(u32, kstack_offset); * add_random_kstack_offset - Increase stack utilization by previously * chosen random offset * - * This should be used in the syscall entry path when interrupts and - * preempt are disabled, and after user registers have been stored to - * the stack. For testing the resulting entropy, please see: - * tools/testing/selftests/lkdtm/stack-entropy.sh + * This should be used in the syscall entry path after user registers have been + * stored to the stack. Preemption may be enabled. For testing the resulting + * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh */ #define add_random_kstack_offset() do { \ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ &randomize_kstack_offset)) { \ - u32 offset = raw_cpu_read(kstack_offset); \ + u32 offset = current->kstack_offset; \ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \ /* Keep allocation even after "ptr" loses scope. */ \ asm volatile("" :: "r"(ptr) : "memory"); \ @@ -69,9 +67,9 @@ DECLARE_PER_CPU(u32, kstack_offset); * choose_random_kstack_offset - Choose the random offset for the next * add_random_kstack_offset() * - * This should only be used during syscall exit when interrupts and - * preempt are disabled. This position in the syscall flow is done to - * frustrate attacks from userspace attempting to learn the next offset: + * This should only be used during syscall exit. Preemption may be enabled. This + * position in the syscall flow is done to frustrate attacks from userspace + * attempting to learn the next offset: * - Maximize the timing uncertainty visible from userspace: if the * offset is chosen at syscall entry, userspace has much more control * over the timing between choosing offsets. "How long will we be in @@ -85,14 +83,20 @@ DECLARE_PER_CPU(u32, kstack_offset); #define choose_random_kstack_offset(rand) do { \ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ &randomize_kstack_offset)) { \ - u32 offset = raw_cpu_read(kstack_offset); \ + u32 offset = current->kstack_offset; \ offset = ror32(offset, 5) ^ (rand); \ - raw_cpu_write(kstack_offset, offset); \ + current->kstack_offset = offset; \ } \ } while (0) + +static inline void random_kstack_task_init(struct task_struct *tsk) +{ + tsk->kstack_offset = 0; +} #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #define add_random_kstack_offset() do { } while (0) #define choose_random_kstack_offset(rand) do { } while (0) +#define random_kstack_task_init(tsk) do { } while (0) #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index a7b4a980eb2f..8358e430dd7f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1592,6 +1592,10 @@ struct task_struct { unsigned long prev_lowest_stack; #endif +#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET + u32 kstack_offset; +#endif + #ifdef CONFIG_X86_MCE void __user *mce_vaddr; __u64 mce_kflags; diff --git a/init/main.c b/init/main.c index 1cb395dd94e4..0a1d8529212e 100644 --- a/init/main.c +++ b/init/main.c @@ -833,7 +833,6 @@ static inline void initcall_debug_enable(void) #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, randomize_kstack_offset); -DEFINE_PER_CPU(u32, kstack_offset); static int __init early_randomize_kstack_offset(char *buf) { diff --git a/kernel/fork.c b/kernel/fork.c index 65113a304518..5715adeb6adf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -95,6 +95,7 @@ #include <linux/thread_info.h> #include <linux/kstack_erase.h> #include <linux/kasan.h> +#include <linux/randomize_kstack.h> #include <linux/scs.h> #include <linux/io_uring.h> #include <linux/io_uring_types.h> @@ -2233,6 +2234,7 @@ __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_io; + random_kstack_task_init(p); stackleak_task_init(p); if (pid != &init_struct_pid) { |
