From 79cd2a11224eab86d6673fe8a11d2046ae9d2757 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Tue, 11 Jul 2023 11:19:51 +0200 Subject: x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG The linker script arch/x86/kernel/vmlinux.lds.S matches the thunk sections ".text.__x86.*" from arch/x86/lib/retpoline.S as follows: .text { [...] TEXT_TEXT [...] __indirect_thunk_start = .; *(.text.__x86.*) __indirect_thunk_end = .; [...] } Macro TEXT_TEXT references TEXT_MAIN which normally expands to only ".text". However, with CONFIG_LTO_CLANG, TEXT_MAIN becomes ".text .text.[0-9a-zA-Z_]*" which wrongly matches also the thunk sections. The output layout is then different than expected. For instance, the currently defined range [__indirect_thunk_start, __indirect_thunk_end] becomes empty. Prevent the problem by using ".." as the first separator, for example, ".text..__x86.indirect_thunk". This pattern is utilized by other explicit section names which start with one of the standard prefixes, such as ".text" or ".data", and that need to be individually selected in the linker script. [ nathan: Fix conflicts with SRSO and fold in fix issue brought up by Andrew Cooper in post-review: https://lore.kernel.org/20230803230323.1478869-1-andrew.cooper3@citrix.com ] Fixes: dc5723b02e52 ("kbuild: add support for Clang LTO") Signed-off-by: Petr Pavlu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Nathan Chancellor Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230711091952.27944-2-petr.pavlu@suse.com --- arch/x86/lib/retpoline.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/lib') diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 132cedbf9e57..8db74d811ce2 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -13,7 +13,7 @@ #include #include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro POLINE reg @@ -148,7 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) * As a result, srso_safe_ret_alias() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR @@ -158,7 +158,7 @@ SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) SYM_FUNC_END(srso_untrain_ret_alias) __EXPORT_THUNK(srso_untrain_ret_alias) - .section .text.__x86.rethunk_safe + .section .text..__x86.rethunk_safe #endif /* Needs a definition for the __x86_return_thunk alternative below. */ @@ -172,7 +172,7 @@ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) int3 SYM_FUNC_END(srso_safe_ret_alias) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: -- cgit v1.2.3