summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r--drivers/iommu/amd_iommu_init.c101
1 files changed, 61 insertions, 40 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6130278c5d71..5a11328f4d98 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -167,7 +167,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
/* Array to assign indices to IOMMUs*/
struct amd_iommu *amd_iommus[MAX_IOMMUS];
-int amd_iommus_present;
+
+/* Number of IOMMUs present in the system */
+static int amd_iommus_present;
/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly;
@@ -254,10 +256,6 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
-static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
- u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write);
-
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@@ -272,6 +270,11 @@ static inline unsigned long tbl_size(int entry_size)
return 1UL << shift;
}
+int amd_iommu_get_num_iommus(void)
+{
+ return amd_iommus_present;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -1336,7 +1339,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
- iommu->index = amd_iommus_present++;
+ iommu->index = amd_iommus_present++;
if (unlikely(iommu->index >= MAX_IOMMUS)) {
WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
@@ -1477,6 +1480,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write);
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
@@ -1488,8 +1493,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
- if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
- (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
+ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
+ (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@@ -2711,6 +2716,18 @@ bool amd_iommu_v2_supported(void)
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
+struct amd_iommu *get_amd_iommu(unsigned int idx)
+{
+ unsigned int i = 0;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ if (i++ == idx)
+ return iommu;
+ return NULL;
+}
+EXPORT_SYMBOL(get_amd_iommu);
+
/****************************************************************************
*
* IOMMU EFR Performance Counter support functionality. This code allows
@@ -2718,17 +2735,14 @@ EXPORT_SYMBOL(amd_iommu_v2_supported);
*
****************************************************************************/
-u8 amd_iommu_pc_get_max_banks(u16 devid)
+u8 amd_iommu_pc_get_max_banks(unsigned int idx)
{
- struct amd_iommu *iommu;
- u8 ret = 0;
+ struct amd_iommu *iommu = get_amd_iommu(idx);
- /* locate the iommu governing the devid */
- iommu = amd_iommu_rlookup_table[devid];
if (iommu)
- ret = iommu->max_banks;
+ return iommu->max_banks;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
@@ -2738,62 +2752,69 @@ bool amd_iommu_pc_supported(void)
}
EXPORT_SYMBOL(amd_iommu_pc_supported);
-u8 amd_iommu_pc_get_max_counters(u16 devid)
+u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{
- struct amd_iommu *iommu;
- u8 ret = 0;
+ struct amd_iommu *iommu = get_amd_iommu(idx);
- /* locate the iommu governing the devid */
- iommu = amd_iommu_rlookup_table[devid];
if (iommu)
- ret = iommu->max_counters;
+ return iommu->max_counters;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
- u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write)
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write)
{
u32 offset;
u32 max_offset_lim;
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present)
+ return -ENODEV;
+
/* Check for valid iommu and pc register indexing */
- if (WARN_ON((fxn > 0x28) || (fxn & 7)))
+ if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
return -ENODEV;
- offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
+ offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
/* Limit the offset to the hw defined mmio region aperture */
- max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
+ max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
(iommu->max_counters << 8) | 0x28);
if ((offset < MMIO_CNTR_REG_OFFSET) ||
(offset > max_offset_lim))
return -EINVAL;
if (is_write) {
- writel((u32)*value, iommu->mmio_base + offset);
- writel((*value >> 32), iommu->mmio_base + offset + 4);
+ u64 val = *value & GENMASK_ULL(47, 0);
+
+ writel((u32)val, iommu->mmio_base + offset);
+ writel((val >> 32), iommu->mmio_base + offset + 4);
} else {
*value = readl(iommu->mmio_base + offset + 4);
*value <<= 32;
- *value = readl(iommu->mmio_base + offset);
+ *value |= readl(iommu->mmio_base + offset);
+ *value &= GENMASK_ULL(47, 0);
}
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write)
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ return -EINVAL;
- /* Make sure the IOMMU PC resource is available */
- if (!amd_iommu_pc_present || iommu == NULL)
- return -ENODEV;
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_reg);
+
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+ if (!iommu)
+ return -EINVAL;
- return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
- value, is_write);
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
+EXPORT_SYMBOL(amd_iommu_pc_set_reg);