summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/context.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 19:49:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 19:49:37 +0300
commit9b9e211360044c12d7738973c944d6f300134881 (patch)
treef994a379135e70397df3c653e6578ad7e5d295fe /arch/arm64/mm/context.c
parenta7ac314061375c7805e0d3a26aad6eb0c41100df (diff)
parent945409a6ef442cfe5f2f14e5626d4306d53100f0 (diff)
downloadlinux-9b9e211360044c12d7738973c944d6f300134881.tar.xz
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - KCSAN enabled for arm64. - Additional kselftests to exercise the syscall ABI w.r.t. SVE/FPSIMD. - Some more SVE clean-ups and refactoring in preparation for SME support (scalable matrix extensions). - BTI clean-ups (SYM_FUNC macros etc.) - arm64 atomics clean-up and codegen improvements. - HWCAPs for FEAT_AFP (alternate floating point behaviour) and FEAT_RPRESS (increased precision of reciprocal estimate and reciprocal square root estimate). - Use SHA3 instructions to speed-up XOR. - arm64 unwind code refactoring/unification. - Avoid DC (data cache maintenance) instructions when DCZID_EL0.DZP == 1 (potentially set by a hypervisor; user-space already does this). - Perf updates for arm64: support for CI-700, HiSilicon PCIe PMU, Marvell CN10K LLC-TAD PMU, miscellaneous clean-ups. - Other fixes and clean-ups; highlights: fix the handling of erratum 1418040, correct the calculation of the nomap region boundaries, introduce io_stop_wc() mapped to the new DGH instruction (data gathering hint). * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (81 commits) arm64: Use correct method to calculate nomap region boundaries arm64: Drop outdated links in comments arm64: perf: Don't register user access sysctl handler multiple times drivers: perf: marvell_cn10k: fix an IS_ERR() vs NULL check perf/smmuv3: Fix unused variable warning when CONFIG_OF=n arm64: errata: Fix exec handling in erratum 1418040 workaround arm64: Unhash early pointer print plus improve comment asm-generic: introduce io_stop_wc() and add implementation for ARM64 arm64: Ensure that the 'bti' macro is defined where linkage.h is included arm64: remove __dma_*_area() aliases docs/arm64: delete a space from tagged-address-abi arm64: Enable KCSAN kselftest/arm64: Add pidbench for floating point syscall cases arm64/fp: Add comments documenting the usage of state restore functions kselftest/arm64: Add a test program to exercise the syscall ABI kselftest/arm64: Allow signal tests to trigger from a function kselftest/arm64: Parameterise ptrace vector length information arm64/sve: Minor clarification of ABI documentation arm64/sve: Generalise vector length configuration prctl() for SME arm64/sve: Make sysctl interface for SVE reusable by SME ...
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r--arch/arm64/mm/context.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cd72576ae2b7..b8b4cf0bcf39 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map;
#define ASID_FIRST_VERSION (1UL << asid_bits)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
-#define asid2idx(asid) ((asid) & ~ASID_MASK)
-#define idx2asid(idx) asid2idx(idx)
+#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
+#define asid2ctxid(asid, genid) ((asid) | (genid))
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void)
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
fallthrough;
- case 0:
+ case ID_AA64MMFR0_ASID_8:
asid = 8;
break;
- case 2:
+ case ID_AA64MMFR0_ASID_16:
asid = 16;
}
@@ -120,7 +120,7 @@ static void flush_context(void)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid2idx(asid), asid_map);
+ __set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
- u64 newasid = generation | (asid & ~ASID_MASK);
+ u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
/*
* If our current ASID was active during a rollover, we
@@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm)
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- if (!__test_and_set_bit(asid2idx(asid), asid_map))
+ if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
return newasid;
}
@@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return idx2asid(asid) | generation;
+ return asid2ctxid(asid, generation);
}
void check_and_switch_context(struct mm_struct *mm)
@@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
}
nr_pinned_asids++;
- __set_bit(asid2idx(asid), pinned_asid_map);
+ __set_bit(ctxid2asid(asid), pinned_asid_map);
refcount_set(&mm->context.pinned, 1);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
- asid &= ~ASID_MASK;
+ asid = ctxid2asid(asid);
/* Set the equivalent of USER_ASID_BIT */
if (asid && arm64_kernel_unmapped_at_el0())
@@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm)
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
if (refcount_dec_and_test(&mm->context.pinned)) {
- __clear_bit(asid2idx(asid), pinned_asid_map);
+ __clear_bit(ctxid2asid(asid), pinned_asid_map);
nr_pinned_asids--;
}