From 48118151d8cc7a457f61d91df0c053473d4b31b1 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 18 Sep 2020 12:18:44 +0200 Subject: arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 3 + arch/arm64/include/asm/mmu_context.h | 11 +++- arch/arm64/mm/context.c | 105 +++++++++++++++++++++++++++++++++-- 3 files changed, 112 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index a7a5ecaa2e83..0fda85b2cc1b 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,11 +17,14 @@ #ifndef __ASSEMBLY__ +#include + typedef struct { atomic64_t id; #ifdef CONFIG_COMPAT void *sigpage; #endif + refcount_t pinned; void *vdso; unsigned long flags; } mm_context_t; diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f2d7537d6f83..0672236e1aea 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -177,7 +177,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + refcount_set(&mm->context.pinned, 0); + return 0; +} #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -248,6 +254,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); +unsigned long arm64_mm_context_get(struct mm_struct *mm); +void arm64_mm_context_put(struct mm_struct *mm); + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 9b11c096a042..001737a8f309 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -27,6 +27,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +static unsigned long max_pinned_asids; +static unsigned long nr_pinned_asids; +static unsigned long *pinned_asid_map; + #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) @@ -72,7 +76,7 @@ void verify_cpu_asid_bits(void) } } -static void set_kpti_asid_bits(void) +static void set_kpti_asid_bits(unsigned long *map) { unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); /* @@ -81,13 +85,15 @@ static void set_kpti_asid_bits(void) * is set, then the ASID will map only userspace. Thus * mark even as reserved for kernel. */ - memset(asid_map, 0xaa, len); + memset(map, 0xaa, len); } static void set_reserved_asid_bits(void) { - if (arm64_kernel_unmapped_at_el0()) - set_kpti_asid_bits(); + if (pinned_asid_map) + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + else if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(asid_map); else bitmap_clear(asid_map, 0, NUM_USER_ASIDS); } @@ -165,6 +171,14 @@ static u64 new_context(struct mm_struct *mm) if (check_update_reserved_asid(asid, newasid)) return newasid; + /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_asids. + */ + if (refcount_read(&mm->context.pinned)) + return newasid; + /* * We had a valid ASID in a previous life, so try to re-use * it if possible. @@ -256,6 +270,71 @@ switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } +unsigned long arm64_mm_context_get(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + if (!pinned_asid_map) + return 0; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + asid = atomic64_read(&mm->context.id); + + if (refcount_inc_not_zero(&mm->context.pinned)) + goto out_unlock; + + if (nr_pinned_asids >= max_pinned_asids) { + asid = 0; + goto out_unlock; + } + + if (!asid_gen_match(asid)) { + /* + * We went through one or more rollover since that ASID was + * used. Ensure that it is still valid, or generate a new one. + */ + asid = new_context(mm); + atomic64_set(&mm->context.id, asid); + } + + nr_pinned_asids++; + __set_bit(asid2idx(asid), pinned_asid_map); + refcount_set(&mm->context.pinned, 1); + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + asid &= ~ASID_MASK; + + /* Set the equivalent of USER_ASID_BIT */ + if (asid && arm64_kernel_unmapped_at_el0()) + asid |= 1; + + return asid; +} +EXPORT_SYMBOL_GPL(arm64_mm_context_get); + +void arm64_mm_context_put(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid = atomic64_read(&mm->context.id); + + if (!pinned_asid_map) + return; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + if (refcount_dec_and_test(&mm->context.pinned)) { + __clear_bit(asid2idx(asid), pinned_asid_map); + nr_pinned_asids--; + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +EXPORT_SYMBOL_GPL(arm64_mm_context_put); + /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { @@ -296,8 +375,11 @@ static int asids_update_limit(void) { unsigned long num_available_asids = NUM_USER_ASIDS; - if (arm64_kernel_unmapped_at_el0()) + if (arm64_kernel_unmapped_at_el0()) { num_available_asids /= 2; + if (pinned_asid_map) + set_kpti_asid_bits(pinned_asid_map); + } /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. @@ -305,6 +387,13 @@ static int asids_update_limit(void) WARN_ON(num_available_asids - 1 <= num_possible_cpus()); pr_info("ASID allocator initialised with %lu entries\n", num_available_asids); + + /* + * There must always be an ASID available after rollover. Ensure that, + * even if all CPUs have a reserved ASID and the maximum number of ASIDs + * are pinned, there still is at least one empty slot in the ASID map. + */ + max_pinned_asids = num_available_asids - num_possible_cpus() - 2; return 0; } arch_initcall(asids_update_limit); @@ -319,13 +408,17 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), + sizeof(*pinned_asid_map), GFP_KERNEL); + nr_pinned_asids = 0; + /* * We cannot call set_reserved_asid_bits() here because CPU * caps are not finalized yet, so it is safer to assume KPTI * and reserve kernel ASID's from beginning. */ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - set_kpti_asid_bits(); + set_kpti_asid_bits(asid_map); return 0; } early_initcall(asids_init); -- cgit v1.2.3 From 6f3c4afae9805bda1682e0ad17b8ce56d85c9a83 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 18 Sep 2020 12:18:46 +0200 Subject: arm64: cpufeature: Export symbol read_sanitised_ftr_reg() The SMMUv3 driver would like to read the MMFR0 PARANGE field in order to share CPU page tables with devices. Allow the driver to be built as module by exporting the read_sanitized_ftr_reg() cpufeature symbol. Signed-off-by: Jean-Philippe Brucker Reviewed-by: Jonathan Cameron Acked-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20200918101852.582559-7-jean-philippe@linaro.org Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6424584be01e..07f10ad8855c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1111,6 +1111,7 @@ u64 read_sanitised_ftr_reg(u32 id) return 0; return regp->sys_val; } +EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); #define read_sysreg_case(r) \ case r: return read_sysreg_s(r) -- cgit v1.2.3 From a76a37777f2c936b1f046bfc0c5982c958b16bfe Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Mon, 28 Sep 2020 16:32:02 +0800 Subject: iommu/arm-smmu-v3: Ensure queue is read after updating prod pointer Reading the 'prod' MMIO register in order to determine whether or not there is valid data beyond 'cons' for a given queue does not provide sufficient dependency ordering, as the resulting access is address dependent only on 'cons' and can therefore be speculated ahead of time, potentially allowing stale data to be read by the CPU. Use readl() instead of readl_relaxed() when updating the shadow copy of the 'prod' pointer, so that all speculated memory reads from the corresponding queue can occur only from valid slots. Signed-off-by: Zhou Wang Link: https://lore.kernel.org/r/1601281922-117296-1-git-send-email-wangzhou1@hisilicon.com [will: Use readl() instead of explicit barrier. Update 'cons' side to match.] Signed-off-by: Will Deacon --- arch/arm64/include/asm/barrier.h | 1 + arch/arm64/include/asm/io.h | 1 + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 11 +++++++++-- 3 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index fb4c27506ef4..c3009b0e5239 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -45,6 +45,7 @@ #define rmb() dsb(ld) #define wmb() dsb(st) +#define dma_mb() dmb(osh) #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index ff50dd731852..fd172c41df90 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -110,6 +110,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define __io_par(v) __iormb(v) #define __iowmb() dma_wmb() +#define __iomb() dma_mb() /* * Relaxed I/O memory access primitives. These follow the Device memory diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 3063aeb38063..1ffec08d345a 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -810,7 +810,7 @@ static void queue_sync_cons_out(struct arm_smmu_queue *q) * Ensure that all CPU accesses (reads and writes) to the queue * are complete before we update the cons pointer. */ - mb(); + __iomb(); writel_relaxed(q->llq.cons, q->cons_reg); } @@ -822,8 +822,15 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q) static int queue_sync_prod_in(struct arm_smmu_queue *q) { + u32 prod; int ret = 0; - u32 prod = readl_relaxed(q->prod_reg); + + /* + * We can't use the _relaxed() variant here, as we must prevent + * speculative reads of the queue before we have determined that + * prod has indeed moved. + */ + prod = readl(q->prod_reg); if (Q_OVF(prod) != Q_OVF(q->llq.prod)) ret = -EOVERFLOW; -- cgit v1.2.3