From e8e5104118473ffa23c013da42ae6a9df2867a07 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 19 Oct 2022 13:03:45 +0100 Subject: arm64/asm: Remove unused enable_da macro We no longer use the enable_da macro, remove it to avoid having to think about maintaining it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221019120346.72289-1-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e5957a53be39..27e0c75f7818 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -51,11 +51,6 @@ msr daif, \flags .endm - /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */ - .macro enable_da - msr daifclr, #(8 | 4) - .endm - /* * Save/restore interrupts. */ -- cgit v1.2.3 From 38e4b6605e5cde68ba388f3175e7b4962694674c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 8 Nov 2022 09:14:06 +0530 Subject: arm64/mm: Drop ARM64_KERNEL_USES_PMD_MAPS Currently ARM64_KERNEL_USES_PMD_MAPS is an unnecessary abstraction. Kernel mapping at PMD (aka huge page aka block) level, is only applicable with 4K base page, which makes it 2MB aligned, a necessary requirement for linear mapping and physical memory start address. This can be easily achieved by directly checking against base page size itself. This drops off the macro ARM64_KERNE_USES_PMD_MAPS which is redundant. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20221108034406.2950071-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 11 +++-------- arch/arm64/mm/mmu.c | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 32d14f481f0c..fcd14197756f 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,11 +18,6 @@ * with 4K (section size = 2M) but not with 16K (section size = 32M) or * 64K (section size = 512M). */ -#ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_KERNEL_USES_PMD_MAPS 1 -#else -#define ARM64_KERNEL_USES_PMD_MAPS 0 -#endif /* * The idmap and swapper page tables need some space reserved in the kernel @@ -34,7 +29,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #else #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) @@ -96,7 +91,7 @@ #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) /* Initial memory map size */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT @@ -112,7 +107,7 @@ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) #else diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9a7c38965154..d386033a074c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1196,7 +1196,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - if (!ARM64_KERNEL_USES_PMD_MAPS) + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return vmemmap_populate_basepages(start, end, node, altmap); do { -- cgit v1.2.3 From 657eef0a5420a02c02945ed8c87f2ddcbd255772 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Nov 2022 12:54:24 +0000 Subject: arm64: atomics: lse: remove stale dependency on JUMP_LABEL Currently CONFIG_ARM64_USE_LSE_ATOMICS depends upon CONFIG_JUMP_LABEL, as the inline atomics were indirected with a static branch. However, since commit: 21fb26bfb01ffe0d ("arm64: alternatives: add alternative_has_feature_*()") ... we use an alternative_branch (which is always available) rather than a static branch, and hence the dependency is unnecessary. Remove the stale dependency, along with the stale include. This will allow the use of LSE atomics in kernels built with CONFIG_JUMP_LABEL=n, and reduces the risk of circular header dependencies via . Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20221114125424.2998268-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/lse.h | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 505c8a1ccbe0..6a4704d4e36b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1714,7 +1714,6 @@ config ARM64_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" - depends on JUMP_LABEL default y help As part of the Large System Extensions, ARMv8.1 introduces new diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index c503db8e73b0..f99d74826a7e 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -10,7 +10,6 @@ #include #include -#include #include #include #include -- cgit v1.2.3 From d8c1d798a2e5091128c391c6dadcc9be334af3f5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Nov 2022 14:40:42 +0000 Subject: arm64: make is_ttbrX_addr() noinstr-safe We use is_ttbr0_addr() in noinstr code, but as it's only marked as inline, it's theoretically possible for the compiler to place it out-of-line and instrument it, which would be problematic. Mark is_ttbr0_addr() as __always_inline such that that can safely be used from noinstr code. For consistency, do the same to is_ttbr1_addr(). Note that while is_ttbr1_addr() calls arch_kasan_reset_tag(), this is a macro (and its callees are either macros or __always_inline), so there is not a risk of transient instrumentation. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20221114144042.3001140-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 445aa3af3b76..400f8956328b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -308,13 +308,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; -- cgit v1.2.3 From d3d10f0d370c6c83e88cd79e450e70c31c6ae50c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 16 Nov 2022 14:13:02 +0530 Subject: arm64/mm: Drop idmap_pg_end[] declaration idmap_pg_end[] is not used anywhere, hence just drop its declaration. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20221116084302.320685-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 71a1af42f0e8..3efedacc014f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; -extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; -- cgit v1.2.3 From 5b468dad6e5cf4998bdc05efbc5526c111666027 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 17 Nov 2022 18:01:44 +0530 Subject: arm64/mm: Drop unused restore_ttbr1 restore_ttbr1 procedure is not used anywhere, hence just drop it. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20221117123144.403582-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 27e0c75f7818..88175551b401 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -614,17 +614,6 @@ alternative_endif #endif .endm -/* - * Perform the reverse of offset_ttbr1. - * bic is used as it can cover the immediate value and, in future, won't need - * to be nop'ed out when dealing with 52-bit kernel VAs. - */ - .macro restore_ttbr1, ttbr -#ifdef CONFIG_ARM64_VA_BITS_52 - bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -#endif - .endm - /* * Arrange a physical address in a TTBR register, taking care of 52-bit * addresses. -- cgit v1.2.3 From a8bf2fc43fc63c0bd38b45c9a9616d43b683585d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 23 Nov 2022 18:02:08 +0000 Subject: arm64/kpti: Move DAIF masking to C code We really don't want to take an exception while replacing TTBR1 so we mask DAIF during the actual update. Currently this is done in the assembly function idmap_cpu_replace_ttbr1() but it could equally be done in the only caller of that function, cpu_replace_ttbr1(). This simplifies the assembly code slightly and means that when working with the code around masking DAIF flags there is one less piece of assembly code which needs to be considered. While we're at it add a comment which makes explicit why we are masking DAIF in this code. There should be no functional effect. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221123180209.634650-2-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 10 ++++++++++ arch/arm64/mm/proc.S | 4 ---- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index d3f8b5df0c1f..72dbd6400549 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; + unsigned long daif; /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); @@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); __cpu_install_idmap(idmap); + + /* + * We really don't want to take *any* exceptions while TTBR1 is + * in the process of being replaced so mask everything. + */ + daif = local_daif_save(); replace_phys(ttbr1); + local_daif_restore(daif); + cpu_uninstall_idmap(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b9ecbbae1e1a..066fa60b93d2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume) * called by anything else. It can only be executed from a TTBR0 mapping. */ SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) - save_and_disable_daif flags=x2 - __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb - restore_daif x2 - ret SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection -- cgit v1.2.3 From d503d01e5016370be8473fc23d800c7ff37ab7f6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 23 Nov 2022 18:02:09 +0000 Subject: arm64/asm: Remove unused assembler DAIF save/restore macros There are no longer any users of the assembler macros for saving and restoring DAIF so remove them to prevent further users being added, there are C equivalents available. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221123180209.634650-3-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 88175551b401..d8d6627be0f6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -34,11 +34,6 @@ wx\n .req w\n .endr - .macro save_and_disable_daif, flags - mrs \flags, daif - msr daifset, #0xf - .endm - .macro disable_daif msr daifset, #0xf .endm @@ -47,10 +42,6 @@ msr daifclr, #0xf .endm - .macro restore_daif, flags:req - msr daif, \flags - .endm - /* * Save/restore interrupts. */ -- cgit v1.2.3