summaryrefslogtreecommitdiff
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c283
1 files changed, 180 insertions, 103 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 6b9bb58a414f..96425e92f313 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
+#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -223,7 +224,33 @@ static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
return ptes_per_table - (i & (ptes_per_table - 1));
}
-static bool selftest_running = false;
+/*
+ * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
+ * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
+ * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
+ * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
+ * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
+ * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
+ * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
+ */
+static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+{
+ unsigned int ias = cfg->ias;
+ unsigned int oas = cfg->oas;
+
+ /* Covers 1 and 2.d */
+ if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
+ return (oas == 48) || (ias == 48);
+
+ /* Covers 2.a and 2.c */
+ if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
+ return (oas == 40) || (oas == 42);
+
+ /* Case 2.b */
+ return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
+ (data->start_level == 1) && (oas == 40);
+}
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
@@ -235,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
@@ -272,7 +303,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, order);
+ iommu_free_pages(pages);
return NULL;
}
@@ -288,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, get_order(size));
+ iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -343,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -445,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
}
@@ -613,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -624,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -676,85 +711,107 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
data->start_level, ptep);
}
+struct io_pgtable_walk_data {
+ struct io_pgtable *iop;
+ void *data;
+ int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size);
+ unsigned long flags;
+ u64 addr;
+ const u64 end;
+};
+
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl);
+
+struct iova_to_phys_data {
+ arm_lpae_iopte pte;
+ int lvl;
+};
+
+static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iova_to_phys_data *data = walk_data->data;
+ data->pte = *ptep;
+ data->lvl = lvl;
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = data->start_level;
-
- do {
- /* Valid IOPTE pointer? */
- if (!ptep)
- return 0;
-
- /* Grab the IOPTE we're interested in */
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
- pte = READ_ONCE(*ptep);
-
- /* Valid entry? */
- if (!pte)
- return 0;
+ struct iova_to_phys_data d;
+ struct io_pgtable_walk_data walk_data = {
+ .data = &d,
+ .visit = visit_iova_to_phys,
+ .addr = iova,
+ .end = iova + 1,
+ };
+ int ret;
- /* Leaf entry? */
- if (iopte_leaf(pte, lvl, data->iop.fmt))
- goto found_translation;
+ ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+ if (ret)
+ return 0;
- /* Take it to the next level */
- ptep = iopte_deref(pte, data);
- } while (++lvl < ARM_LPAE_MAX_LEVELS);
+ iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
+ return iopte_to_paddr(d.pte, data) | iova;
+}
- /* Ran out of page tables to walk */
+static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
+ data->ptes[lvl] = *ptep;
return 0;
-
-found_translation:
- iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
- return iopte_to_paddr(pte, data) | iova;
}
-struct io_pgtable_walk_data {
- struct iommu_dirty_bitmap *dirty;
- unsigned long flags;
- u64 addr;
- const u64 end;
-};
+static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
+ void *wd)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_walk_data walk_data = {
+ .data = wd,
+ .visit = visit_pgtable_walk,
+ .addr = iova,
+ .end = iova + 1,
+ };
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+}
-static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep, int lvl)
+static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
- if (iopte_leaf(pte, lvl, iop->fmt)) {
- size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ int ret = walk_data->visit(walk_data, lvl, ptep, size);
+ if (ret)
+ return ret;
- if (iopte_writeable_dirty(pte)) {
- iommu_dirty_bitmap_record(walk_data->dirty,
- walk_data->addr, size);
- if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
- iopte_set_writeable_clean(ptep);
- }
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
walk_data->addr += size;
return 0;
}
- if (WARN_ON(!iopte_table(pte, lvl)))
+ if (!iopte_table(pte, lvl)) {
return -EINVAL;
+ }
ptep = iopte_deref(pte, data);
- return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
+ return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
}
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl)
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl)
{
u32 idx;
int max_entries, ret;
@@ -769,7 +826,7 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
- ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
+ ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
@@ -777,6 +834,23 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
return 0;
}
+static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iommu_dirty_bitmap *dirty = walk_data->data;
+
+ if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
+ return 0;
+
+ if (iopte_writeable_dirty(*ptep)) {
+ iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
+ if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
+ iopte_set_writeable_clean(ptep);
+ }
+
+ return 0;
+}
+
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
@@ -785,7 +859,9 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
- .dirty = dirty,
+ .iop = &data->iop,
+ .data = dirty,
+ .visit = visit_dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
@@ -800,7 +876,7 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
- return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -882,6 +958,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
+ .pgtable_walk = arm_lpae_pgtable_walk,
};
return data;
@@ -898,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -999,25 +1077,20 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
- /*
- * Concatenate PGDs at level 1 if possible in order to reduce
- * the depth of the stage-2 walk.
- */
- if (data->start_level == 0) {
- unsigned long pgd_pages;
-
- pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
- if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_bits += data->bits_per_level;
- data->start_level++;
- }
+ if (arm_lpae_concat_mandatory(cfg, data)) {
+ if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
+ ARM_LPAE_S2_MAX_CONCAT_PAGES))
+ return NULL;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
/* VTCR */
@@ -1246,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1262,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1352,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1364,36 +1433,44 @@ static int __init arm_lpae_do_selftests(void)
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] __initconst = {
+ static const unsigned int address_size[] __initconst = {
32, 36, 40, 42, 44, 48,
};
- int i, j, pass = 0, fail = 0;
- struct device dev;
+ int i, j, k, pass = 0, fail = 0;
+ struct faux_device *dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
- .oas = 48,
.coherent_walk = true,
- .iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
};
- /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
- set_dev_node(&dev, NUMA_NO_NODE);
+ dev = faux_device_create("io-pgtable-test", NULL, 0);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg.iommu_dev = &dev->dev;
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
- for (j = 0; j < ARRAY_SIZE(ias); ++j) {
- cfg.pgsize_bitmap = pgsize[i];
- cfg.ias = ias[j];
- pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
- pgsize[i], ias[j]);
- if (arm_lpae_run_tests(&cfg))
- fail++;
- else
- pass++;
+ for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
+ /* Don't use ias > oas as it is not valid for stage-2. */
+ for (k = 0; k <= j; ++k) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = address_size[k];
+ cfg.oas = address_size[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
+ pgsize[i], cfg.ias, cfg.oas);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
}
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ faux_device_destroy(dev);
+
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);