summaryrefslogtreecommitdiff
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorBorislav Petkov (AMD) <bp@alien8.de>2023-06-28 12:02:39 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-08-08 20:57:40 +0300
commit3f9b7101bea1dcb63410c016ceb266f6e9f733c9 (patch)
tree26d36e4a2eab4d30187fe63be3a9a408ceea3ad1 /arch/x86/lib
parent34f23ba8a399ecd38b45c84da257b91d278e88aa (diff)
downloadlinux-3f9b7101bea1dcb63410c016ceb266f6e9f733c9.tar.xz
x86/srso: Add a Speculative RAS Overflow mitigation
Upstream commit: fb3bd914b3ec28f5fb697ac55c4846ac2d542855 Add a mitigation for the speculative return address stack overflow vulnerability found on AMD processors. The mitigation works by ensuring all RET instructions speculate to a controlled location, similar to how speculation is controlled in the retpoline sequence. To accomplish this, the __x86_return_thunk forces the CPU to mispredict every function return using a 'safe return' sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the untraining function srso_untrain_ret_alias() and the safe return function srso_safe_ret_alias() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation technique similar to Retbleed one: srso_untrain_ret() and srso_safe_ret(). Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/retpoline.S81
1 files changed, 77 insertions, 4 deletions
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 1221bb099afb..5f7eed97487e 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -9,6 +9,7 @@
#include <asm/nospec-branch.h>
#include <asm/unwind_hints.h>
#include <asm/frame.h>
+#include <asm/nops.h>
.section .text.__x86.indirect_thunk
@@ -73,6 +74,45 @@ SYM_CODE_END(__x86_indirect_thunk_array)
*/
#ifdef CONFIG_RETHUNK
+/*
+ * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
+ * special addresses:
+ *
+ * - srso_untrain_ret_alias() is 2M aligned
+ * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
+ * and 20 in its virtual address are set (while those bits in the
+ * srso_untrain_ret_alias() function are cleared).
+ *
+ * This guarantees that those two addresses will alias in the branch
+ * target buffer of Zen3/4 generations, leading to any potential
+ * poisoned entries at that BTB slot to get evicted.
+ *
+ * As a result, srso_safe_ret_alias() becomes a safe return.
+ */
+#ifdef CONFIG_CPU_SRSO
+ .section .text.__x86.rethunk_untrain
+
+SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+ ASM_NOP2
+ lfence
+ jmp __x86_return_thunk
+SYM_FUNC_END(srso_untrain_ret_alias)
+__EXPORT_THUNK(srso_untrain_ret_alias)
+
+ .section .text.__x86.rethunk_safe
+#endif
+
+/* Needs a definition for the __x86_return_thunk alternative below. */
+SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+#ifdef CONFIG_CPU_SRSO
+ add $8, %_ASM_SP
+ UNWIND_HINT_FUNC
+#endif
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(srso_safe_ret_alias)
+
.section .text.__x86.return_thunk
/*
@@ -85,7 +125,7 @@ SYM_CODE_END(__x86_indirect_thunk_array)
* from re-poisioning the BTB prediction.
*/
.align 64
- .skip 63, 0xcc
+ .skip 64 - (__ret - zen_untrain_ret), 0xcc
SYM_FUNC_START_NOALIGN(zen_untrain_ret);
/*
@@ -117,10 +157,10 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
* evicted, __x86_return_thunk will suffer Straight Line Speculation
* which will be contained safely by the INT3.
*/
-SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
+SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
ret
int3
-SYM_CODE_END(__x86_return_thunk)
+SYM_CODE_END(__ret)
/*
* Ensure the TEST decoding / BTB invalidation is complete.
@@ -131,11 +171,44 @@ SYM_CODE_END(__x86_return_thunk)
* Jump back and execute the RET in the middle of the TEST instruction.
* INT3 is for SLS protection.
*/
- jmp __x86_return_thunk
+ jmp __ret
int3
SYM_FUNC_END(zen_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret)
+/*
+ * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
+ * above. On kernel entry, srso_untrain_ret() is executed which is a
+ *
+ * movabs $0xccccccc308c48348,%rax
+ *
+ * and when the return thunk executes the inner label srso_safe_ret()
+ * later, it is a stack manipulation and a RET which is mispredicted and
+ * thus a "safe" one to use.
+ */
+ .align 64
+ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
+SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ .byte 0x48, 0xb8
+
+SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+ add $8, %_ASM_SP
+ ret
+ int3
+ int3
+ int3
+ lfence
+ call srso_safe_ret
+ int3
+SYM_CODE_END(srso_safe_ret)
+SYM_FUNC_END(srso_untrain_ret)
+__EXPORT_THUNK(srso_untrain_ret)
+
+SYM_FUNC_START(__x86_return_thunk)
+ ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
+ int3
+SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_RETHUNK */