summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasant Hegde <vasant.hegde@amd.com>2026-04-20 11:42:03 +0300
committerJoerg Roedel <joerg.roedel@amd.com>2026-04-27 14:49:16 +0300
commit58c0ac6125d89bf6ec65a521eaeb52a0e8e20a9f (patch)
tree5d84ddd63bd54046884a0a1b7b358da126258cb6
parent254f49634ee16a731174d2ae34bc50bd5f45e731 (diff)
downloadlinux-58c0ac6125d89bf6ec65a521eaeb52a0e8e20a9f.tar.xz
iommu/amd: Use maximum Event log buffer size when SNP is enabled on Family 0x19
Due to CVE-2023-20585, the Event log buffer must use the maximum supported size (512K) on Milan/Genoa (Family 0x19) systems when SNP is enabled, to mitigate a potential security vulnerability. All other systems continue to use the default Event log buffer size (8K). Apply the errata fix by making the following changes: * Introduce new global variable (amd_iommu_evtlog_size) to have event log buffer size. Adjust variable size for family 0x19. * Since 'iommu_snp_enable()' must be called after the core IOMMU subsystem is initialized, it cannot be moved to the early init stage. The SNP errata must also be applied after the 'iommu_snp_enable()' check. Therefore, 'alloc_event_buffer()' and 'iommu_enable_event_buffer()' are now called in the IOMMU_ENABLED state, after the errata is applied. * Adjust alloc_event_buffer() and iommu_enable_event_buffer() to handle all IOMMU instances. * Also rename EVT_* macros to make it more readable. Link: https://www.amd.com/en/resources/product-security/bulletin/amd-sb-3016.html Cc: Borislav Petkov <bp@alien8.de> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Tested-by: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c110
-rw-r--r--drivers/iommu/amd/iommu.c2
4 files changed, 86 insertions, 38 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1342e764a548..f1c486dcf0f3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,6 +11,8 @@
#include "amd_iommu_types.h"
+extern int amd_iommu_evtlog_size;
+
irqreturn_t amd_iommu_int_thread(int irq, void *data);
irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index c685d3771436..c3430c09bc5c 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/iommufd.h>
@@ -141,7 +142,6 @@
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
/* event logging constants */
-#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
#define EVENT_TYPE_MASK 0xf
#define EVENT_TYPE_ILL_DEV 0x1
@@ -259,8 +259,12 @@
#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x))
/* constants for event buffer handling */
-#define EVT_BUFFER_SIZE 8192 /* 512 entries */
-#define EVT_LEN_MASK (0x9ULL << 56)
+#define EVTLOG_ENTRY_SIZE 0x10
+#define EVTLOG_SIZE_SHIFT 56
+#define EVTLOG_SIZE_DEF SZ_8K /* 512 entries */
+#define EVTLOG_LEN_MASK_DEF (0x9ULL << EVTLOG_SIZE_SHIFT)
+#define EVTLOG_SIZE_MAX SZ_512K /* 32K entries */
+#define EVTLOG_LEN_MASK_MAX (0xFULL << EVTLOG_SIZE_SHIFT)
/* Constants for PPR Log handling */
#define PPR_LOG_ENTRIES 512
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 56ad020df494..d8dc5c6db29d 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -132,6 +132,8 @@ struct ivhd_entry {
u8 uid;
} __attribute__((packed));
+int amd_iommu_evtlog_size = EVTLOG_SIZE_DEF;
+
/*
* An AMD IOMMU memory definition structure. It defines things like exclusion
* ranges for devices and regions that should be unity mapped.
@@ -865,35 +867,47 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
}
/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_event_buffer(struct amd_iommu *iommu)
+static int __init alloc_event_buffer(void)
{
- iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
- EVT_BUFFER_SIZE);
+ struct amd_iommu *iommu;
- return iommu->evt_buf ? 0 : -ENOMEM;
+ for_each_iommu(iommu) {
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
+ amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
}
-static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+static void iommu_enable_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 entry;
- BUG_ON(iommu->evt_buf == NULL);
+ for_each_iommu(iommu) {
+ BUG_ON(iommu->evt_buf == NULL);
- if (!is_kdump_kernel()) {
- /*
- * Event buffer is re-used for kdump kernel and setting
- * of MMIO register is not required.
- */
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
- }
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf);
+ entry |= (amd_iommu_evtlog_size == EVTLOG_SIZE_DEF) ?
+ EVTLOG_LEN_MASK_DEF : EVTLOG_LEN_MASK_MAX;
+
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ }
}
/*
@@ -984,15 +998,20 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
return 0;
}
-static int __init remap_event_buffer(struct amd_iommu *iommu)
+static int __init remap_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 paddr;
pr_info_once("Re-using event buffer from the previous kernel\n");
- paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
- iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+ for_each_iommu(iommu) {
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
- return iommu->evt_buf ? 0 : -ENOMEM;
+ return 0;
}
static int __init remap_command_buffer(struct amd_iommu *iommu)
@@ -1044,10 +1063,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = remap_command_buffer(iommu);
if (ret)
return ret;
-
- ret = remap_event_buffer(iommu);
- if (ret)
- return ret;
} else {
ret = alloc_cwwb_sem(iommu);
if (ret)
@@ -1056,10 +1071,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = alloc_command_buffer(iommu);
if (ret)
return ret;
-
- ret = alloc_event_buffer(iommu);
- if (ret)
- return ret;
}
return 0;
@@ -2893,7 +2904,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
@@ -2957,7 +2967,6 @@ static void early_enable_iommus(void)
iommu_disable_event_buffer(iommu);
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -3070,6 +3079,7 @@ static void amd_iommu_resume(void *data)
for_each_iommu(iommu)
early_enable_iommu(iommu);
+ iommu_enable_event_buffer();
amd_iommu_enable_interrupts();
}
@@ -3399,6 +3409,23 @@ disable_snp:
#endif
}
+static void amd_iommu_apply_erratum_snp(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!amd_iommu_snp_en)
+ return;
+
+ /* Errata fix for Family 0x19 */
+ if (boot_cpu_data.x86 != 0x19)
+ return;
+
+ /* Set event log buffer size to max */
+ amd_iommu_evtlog_size = EVTLOG_SIZE_MAX;
+ pr_info("Applying erratum: Increase Event log size to 0x%x\n",
+ amd_iommu_evtlog_size);
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -3435,6 +3462,21 @@ static int __init state_next(void)
case IOMMU_ENABLED:
register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
+
+ amd_iommu_apply_erratum_snp();
+
+ /* Allocate/enable event log buffer */
+ if (is_kdump_kernel())
+ ret = remap_event_buffer();
+ else
+ ret = alloc_event_buffer();
+
+ if (ret) {
+ init_state = IOMMU_INIT_ERROR;
+ break;
+ }
+ iommu_enable_event_buffer();
+
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@@ -4037,7 +4079,7 @@ int amd_iommu_snp_disable(void)
return 0;
for_each_iommu(iommu) {
- ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ ret = iommu_make_shared(iommu->evt_buf, amd_iommu_evtlog_size);
if (ret)
return ret;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 01171361f9bc..157505d96fdd 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1010,7 +1010,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
iommu_print_event(iommu, iommu->evt_buf + head);
/* Update head pointer of hardware ring-buffer */
- head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
+ head = (head + EVTLOG_ENTRY_SIZE) % amd_iommu_evtlog_size;
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}