diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-14 03:52:29 +0300 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-14 03:52:29 +0300 |
| commit | d568788baab24875604c231f723dbb72387fb081 (patch) | |
| tree | 0921e372d643541c59751e1af47b20fc1b702204 /include | |
| parent | cea4a90faf9e5d15aee1fd01883bc81ad7640260 (diff) | |
| parent | cf2f06f7152d5e38a87aa2e9b8b452714789f6ba (diff) | |
| download | linux-d568788baab24875604c231f723dbb72387fb081.tar.xz | |
Merge tag 'hardening-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardening updates from Kees Cook:
- randomize_kstack: Improve implementation across arches (Ryan Roberts)
- lkdtm/fortify: Drop unneeded FORTIFY_STR_OBJECT test
- refcount: Remove unused __signed_wrap function annotations
* tag 'hardening-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
lkdtm/fortify: Drop unneeded FORTIFY_STR_OBJECT test
refcount: Remove unused __signed_wrap function annotations
randomize_kstack: Unify random source across arches
randomize_kstack: Maintain kstack_offset per task
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/compiler_types.h | 9 | ||||
| -rw-r--r-- | include/linux/randomize_kstack.h | 54 | ||||
| -rw-r--r-- | include/linux/refcount.h | 10 |
3 files changed, 26 insertions, 47 deletions
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 890076d0974b..e8fd77593b68 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -432,18 +432,11 @@ struct ftrace_likely_data { #define at_least #endif -/* Do not trap wrapping arithmetic within an annotated function. */ -#ifdef CONFIG_UBSAN_INTEGER_WRAP -# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow"))) -#else -# define __signed_wrap -#endif - /* Section for code which can't be instrumented at all */ #define __noinstr_section(section) \ noinline notrace __attribute((__section__(section))) \ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \ - __no_sanitize_memory __signed_wrap + __no_sanitize_memory #define noinstr __noinstr_section(".noinstr.text") diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index 1d982dbdd0d0..024fc20e7762 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -6,10 +6,10 @@ #include <linux/kernel.h> #include <linux/jump_label.h> #include <linux/percpu-defs.h> +#include <linux/prandom.h> DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, randomize_kstack_offset); -DECLARE_PER_CPU(u32, kstack_offset); /* * Do not use this anywhere else in the kernel. This is used here because @@ -46,53 +46,39 @@ DECLARE_PER_CPU(u32, kstack_offset); #define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100) #endif +DECLARE_PER_CPU(struct rnd_state, kstack_rnd_state); + +static __always_inline u32 get_kstack_offset(void) +{ + struct rnd_state *state; + u32 rnd; + + state = &get_cpu_var(kstack_rnd_state); + rnd = prandom_u32_state(state); + put_cpu_var(kstack_rnd_state); + + return rnd; +} + /** - * add_random_kstack_offset - Increase stack utilization by previously - * chosen random offset + * add_random_kstack_offset - Increase stack utilization by a random offset. * - * This should be used in the syscall entry path when interrupts and - * preempt are disabled, and after user registers have been stored to - * the stack. For testing the resulting entropy, please see: - * tools/testing/selftests/lkdtm/stack-entropy.sh + * This should be used in the syscall entry path after user registers have been + * stored to the stack. Preemption may be enabled. For testing the resulting + * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh */ #define add_random_kstack_offset() do { \ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ &randomize_kstack_offset)) { \ - u32 offset = raw_cpu_read(kstack_offset); \ + u32 offset = get_kstack_offset(); \ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \ /* Keep allocation even after "ptr" loses scope. */ \ asm volatile("" :: "r"(ptr) : "memory"); \ } \ } while (0) -/** - * choose_random_kstack_offset - Choose the random offset for the next - * add_random_kstack_offset() - * - * This should only be used during syscall exit when interrupts and - * preempt are disabled. This position in the syscall flow is done to - * frustrate attacks from userspace attempting to learn the next offset: - * - Maximize the timing uncertainty visible from userspace: if the - * offset is chosen at syscall entry, userspace has much more control - * over the timing between choosing offsets. "How long will we be in - * kernel mode?" tends to be more difficult to predict than "how long - * will we be in user mode?" - * - Reduce the lifetime of the new offset sitting in memory during - * kernel mode execution. Exposure of "thread-local" memory content - * (e.g. current, percpu, etc) tends to be easier than arbitrary - * location memory exposure. - */ -#define choose_random_kstack_offset(rand) do { \ - if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ - &randomize_kstack_offset)) { \ - u32 offset = raw_cpu_read(kstack_offset); \ - offset = ror32(offset, 5) ^ (rand); \ - raw_cpu_write(kstack_offset, offset); \ - } \ -} while (0) #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #define add_random_kstack_offset() do { } while (0) -#define choose_random_kstack_offset(rand) do { } while (0) #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #endif diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 3da377ffb0c2..ba7657ced281 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -170,7 +170,7 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __must_check __signed_wrap +static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) { int old = refcount_read(r); @@ -212,7 +212,7 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) return __refcount_add_not_zero(i, r, NULL); } -static inline __must_check __signed_wrap +static inline __must_check bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp, int limit) { @@ -244,7 +244,7 @@ __refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit) return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit); } -static inline __must_check __signed_wrap +static inline __must_check bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) { return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX); @@ -277,7 +277,7 @@ static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t return __refcount_add_not_zero_acquire(i, r, NULL); } -static inline __signed_wrap +static inline void __refcount_add(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_add_relaxed(i, &r->refs); @@ -383,7 +383,7 @@ static inline void refcount_inc(refcount_t *r) __refcount_inc(r, NULL); } -static inline __must_check __signed_wrap +static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(i, &r->refs); |
