summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSohil Mehta <sohil.mehta@intel.com>2025-03-28 02:46:26 +0300
committerIngo Molnar <mingo@kernel.org>2025-04-01 23:26:16 +0300
commit59cddd397accfa92fc36c223e3d532b47fdab841 (patch)
treefb05ffe2ebe3be54f0f424a3d2144f22d191b4b0
parentb4bc3144c1eca9107f45018000a1e68bfd308d8c (diff)
downloadlinux-59cddd397accfa92fc36c223e3d532b47fdab841.tar.xz
x86/nmi: Improve and relocate NMI handler comments
Some of the comments in the default NMI handling code are out of place or inadequate. Move them to the appropriate locations and update them as needed. Move the comment related to CPU-specific NMIs closer to the actual code. Also, add more details about how back-to-back NMIs are detected since that isn't immediately obvious. Opportunistically, replace an #ifdef section in the vicinity with an IS_ENABLED() check to make the code easier to read. Signed-off-by: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Kai Huang <kai.huang@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Nikolay Borisov <nik.borisov@suse.com> Link: https://lore.kernel.org/r/20250327234629.3953536-7-sohil.mehta@intel.com
-rw-r--r--arch/x86/kernel/nmi.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 2a07c9adc6a6..59ed74ec010e 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -359,17 +359,18 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
bool b2b = false;
/*
- * CPU-specific NMI must be processed before non-CPU-specific
- * NMI, otherwise we may lose it, because the CPU-specific
- * NMI can not be detected/processed on other CPUs.
- */
-
- /*
- * Back-to-back NMIs are interesting because they can either
- * be two NMI or more than two NMIs (any thing over two is dropped
- * due to NMI being edge-triggered). If this is the second half
- * of the back-to-back NMI, assume we dropped things and process
- * more handlers. Otherwise reset the 'swallow' NMI behaviour
+ * Back-to-back NMIs are detected by comparing the RIP of the
+ * current NMI with that of the previous NMI. If it is the same,
+ * it is assumed that the CPU did not have a chance to jump back
+ * into a non-NMI context and execute code in between the two
+ * NMIs.
+ *
+ * They are interesting because even if there are more than two,
+ * only a maximum of two can be detected (anything over two is
+ * dropped due to NMI being edge-triggered). If this is the
+ * second half of the back-to-back NMI, assume we dropped things
+ * and process more handlers. Otherwise, reset the 'swallow' NMI
+ * behavior.
*/
if (regs->ip == __this_cpu_read(last_nmi_rip))
b2b = true;
@@ -383,6 +384,11 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
goto out;
+ /*
+ * CPU-specific NMI must be processed before non-CPU-specific
+ * NMI, otherwise we may lose it, because the CPU-specific
+ * NMI can not be detected/processed on other CPUs.
+ */
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -419,13 +425,14 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
+
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
- reassert_nmi();
-#endif
+ if (IS_ENABLED(CONFIG_X86_32))
+ reassert_nmi();
+
__this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
goto out;