From e8e5104118473ffa23c013da42ae6a9df2867a07 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 19 Oct 2022 13:03:45 +0100 Subject: arm64/asm: Remove unused enable_da macro We no longer use the enable_da macro, remove it to avoid having to think about maintaining it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221019120346.72289-1-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e5957a53be39..27e0c75f7818 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -51,11 +51,6 @@ msr daif, \flags .endm - /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */ - .macro enable_da - msr daifclr, #(8 | 4) - .endm - /* * Save/restore interrupts. */ -- cgit v1.2.3 From 38e4b6605e5cde68ba388f3175e7b4962694674c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 8 Nov 2022 09:14:06 +0530 Subject: arm64/mm: Drop ARM64_KERNEL_USES_PMD_MAPS Currently ARM64_KERNEL_USES_PMD_MAPS is an unnecessary abstraction. Kernel mapping at PMD (aka huge page aka block) level, is only applicable with 4K base page, which makes it 2MB aligned, a necessary requirement for linear mapping and physical memory start address. This can be easily achieved by directly checking against base page size itself. This drops off the macro ARM64_KERNE_USES_PMD_MAPS which is redundant. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20221108034406.2950071-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 11 +++-------- arch/arm64/mm/mmu.c | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 32d14f481f0c..fcd14197756f 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,11 +18,6 @@ * with 4K (section size = 2M) but not with 16K (section size = 32M) or * 64K (section size = 512M). */ -#ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_KERNEL_USES_PMD_MAPS 1 -#else -#define ARM64_KERNEL_USES_PMD_MAPS 0 -#endif /* * The idmap and swapper page tables need some space reserved in the kernel @@ -34,7 +29,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #else #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) @@ -96,7 +91,7 @@ #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) /* Initial memory map size */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT @@ -112,7 +107,7 @@ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) #else diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9a7c38965154..d386033a074c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1196,7 +1196,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - if (!ARM64_KERNEL_USES_PMD_MAPS) + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return vmemmap_populate_basepages(start, end, node, altmap); do { -- cgit v1.2.3 From 59598b42eb52c734bd45d00f64a73129a4537e49 Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Sat, 29 Oct 2022 12:37:48 +0530 Subject: arm64: entry: Fix typo Fix the following typo in entry-common.c intrumentable => instrumentable Signed-off-by: Mukesh Ojha Acked-by: Mark Rutland Link: https://lore.kernel.org/r/1667027268-1255-1-git-send-email-quic_mojha@quicinc.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 27369fa1c032..9a553b185e63 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -30,7 +30,7 @@ /* * Handle IRQ/context state management when entering from kernel mode. * Before this function is called it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. * * This is intended to match the logic in irqentry_enter(), handling the kernel * mode transitions only. @@ -63,7 +63,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting to kernel mode. * After this function returns it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. * * This is intended to match the logic in irqentry_exit(), handling the kernel * mode transitions only, and with preemption handled elsewhere. @@ -97,7 +97,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when entering from user mode. * Before this function is called it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. */ static __always_inline void __enter_from_user_mode(void) { @@ -116,7 +116,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting to user mode. * After this function returns it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. */ static __always_inline void __exit_to_user_mode(void) { @@ -152,7 +152,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when entering an NMI from user/kernel * mode. Before this function is called it is not safe to call regular kernel - * code, intrumentable code, or any code which may trigger an exception. + * code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_enter_nmi(struct pt_regs *regs) { @@ -170,7 +170,7 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting an NMI from user/kernel * mode. After this function returns it is not safe to call regular kernel - * code, intrumentable code, or any code which may trigger an exception. + * code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_exit_nmi(struct pt_regs *regs) { @@ -192,7 +192,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) /* * Handle IRQ/context state management when entering a debug exception from * kernel mode. Before this function is called it is not safe to call regular - * kernel code, intrumentable code, or any code which may trigger an exception. + * kernel code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) { @@ -207,7 +207,7 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting a debug exception from * kernel mode. After this function returns it is not safe to call regular - * kernel code, intrumentable code, or any code which may trigger an exception. + * kernel code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) { -- cgit v1.2.3 From 1e55b44d9ecd42dee7106ce635ffd855183f5221 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Fri, 4 Nov 2022 06:16:59 +0000 Subject: arm64: paravirt: remove conduit check in has_pv_steal_clock arm_smccc_1_1_invoke() which is called later on in the function will return failure if there's no conduit (or pre-SMCCC 1.1), hence the check is unnecessary. Suggested-by: Steven Price Signed-off-by: Usama Arif Reviewed-by: Steven Price Link: https://lore.kernel.org/r/20221104061659.4116508-1-usama.arif@bytedance.com Signed-off-by: Will Deacon --- arch/arm64/kernel/paravirt.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 57c7c211f8c7..aa718d6a9274 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -141,10 +141,6 @@ static bool __init has_pv_steal_clock(void) { struct arm_smccc_res res; - /* To detect the presence of PV time support we require SMCCC 1.1+ */ - if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) - return false; - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES, &res); -- cgit v1.2.3 From 657eef0a5420a02c02945ed8c87f2ddcbd255772 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Nov 2022 12:54:24 +0000 Subject: arm64: atomics: lse: remove stale dependency on JUMP_LABEL Currently CONFIG_ARM64_USE_LSE_ATOMICS depends upon CONFIG_JUMP_LABEL, as the inline atomics were indirected with a static branch. However, since commit: 21fb26bfb01ffe0d ("arm64: alternatives: add alternative_has_feature_*()") ... we use an alternative_branch (which is always available) rather than a static branch, and hence the dependency is unnecessary. Remove the stale dependency, along with the stale include. This will allow the use of LSE atomics in kernels built with CONFIG_JUMP_LABEL=n, and reduces the risk of circular header dependencies via . Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20221114125424.2998268-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/lse.h | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 505c8a1ccbe0..6a4704d4e36b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1714,7 +1714,6 @@ config ARM64_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" - depends on JUMP_LABEL default y help As part of the Large System Extensions, ARMv8.1 introduces new diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index c503db8e73b0..f99d74826a7e 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -10,7 +10,6 @@ #include #include -#include #include #include #include -- cgit v1.2.3 From 9e75e74b07ab17e6407b567eb4a8b3d0c93de5f1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 31 Oct 2022 19:24:50 +0000 Subject: arm64/signal: Document our convention for choosing magic numbers Szabolcs Nagy has pointed out that most of our signal frame magic numbers are chosen to be meaningful ASCII when dumped to aid manual parsing. This seems sensible since it might help someone parsing things out, let's document it so people implementing new signal contexts are aware of it and are more likely to follow it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221031192450.826159-1-broonie@kernel.org [will: Fixed typo and tweaked wording] Signed-off-by: Will Deacon --- arch/arm64/include/uapi/asm/sigcontext.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 4aaf31e3bf16..9525041e4a14 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -62,6 +62,10 @@ struct sigcontext { * context. Such structures must be placed after the rt_sigframe on the stack * and be 16-byte aligned. The last structure must be a dummy one with the * magic and size set to 0. + * + * Note that the values allocated for use as magic should be chosen to + * be meaningful in ASCII to aid manual parsing, ZA doesn't follow this + * convention due to oversight but it should be observed for future additions. */ struct _aarch64_ctx { __u32 magic; -- cgit v1.2.3 From d8c1d798a2e5091128c391c6dadcc9be334af3f5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Nov 2022 14:40:42 +0000 Subject: arm64: make is_ttbrX_addr() noinstr-safe We use is_ttbr0_addr() in noinstr code, but as it's only marked as inline, it's theoretically possible for the compiler to place it out-of-line and instrument it, which would be problematic. Mark is_ttbr0_addr() as __always_inline such that that can safely be used from noinstr code. For consistency, do the same to is_ttbr1_addr(). Note that while is_ttbr1_addr() calls arch_kasan_reset_tag(), this is a macro (and its callees are either macros or __always_inline), so there is not a risk of transient instrumentation. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20221114144042.3001140-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 445aa3af3b76..400f8956328b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -308,13 +308,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; -- cgit v1.2.3 From 9ed2b4616d4e846ece2a04cb5007ce1d1bd9e3f3 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 18 Nov 2022 11:01:02 +0530 Subject: arm64/mm: Drop redundant BUG_ON(!pgtable_alloc) __create_pgd_mapping_locked() expects a page allocator used while mapping a virtual range. This page allocator function propagates down the call chain, while building intermediate levels in the page table. Passed page allocator is a necessary ingredient required to build the page table but its presence can be asserted just once in the very beginning rather than in all the down stream functions. This consolidates BUG_ON(!pgtable_alloc) checks just in a single place i.e __create_pgd_mapping_locked(). Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20221118053102.500216-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d386033a074c..73a12b3abf82 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -207,7 +207,6 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, if (flags & NO_EXEC_MAPPINGS) pmdval |= PMD_TABLE_PXN; - BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(PAGE_SHIFT); __pmd_populate(pmdp, pte_phys, pmdval); pmd = READ_ONCE(*pmdp); @@ -285,7 +284,6 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, if (flags & NO_EXEC_MAPPINGS) pudval |= PUD_TABLE_PXN; - BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(PMD_SHIFT); __pud_populate(pudp, pmd_phys, pudval); pud = READ_ONCE(*pudp); @@ -324,7 +322,6 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, if (flags & NO_EXEC_MAPPINGS) p4dval |= P4D_TABLE_PXN; - BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(PUD_SHIFT); __p4d_populate(p4dp, pud_phys, p4dval); p4d = READ_ONCE(*p4dp); @@ -383,6 +380,7 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, phys &= PAGE_MASK; addr = virt & PAGE_MASK; end = PAGE_ALIGN(virt + size); + BUG_ON(!pgtable_alloc); do { next = pgd_addr_end(addr, end); -- cgit v1.2.3 From d3d10f0d370c6c83e88cd79e450e70c31c6ae50c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 16 Nov 2022 14:13:02 +0530 Subject: arm64/mm: Drop idmap_pg_end[] declaration idmap_pg_end[] is not used anywhere, hence just drop its declaration. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20221116084302.320685-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 71a1af42f0e8..3efedacc014f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; -extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; -- cgit v1.2.3 From 56eea7f87fb66d7e9508b1b883a602b8df122b03 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 17 Nov 2022 13:16:50 +0000 Subject: arm64: alternatives: make apply_alternatives_vdso() static We define and use apply_alternatives_vdso() within alternative.c, and don't provide a prototype in a header. There's no need for it to be visible outside of alternative.c, so mark it as static. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Joey Gouly Cc: Will Deacon Link: https://lore.kernel.org/r/20221117131650.4056636-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/alternative.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 91263d09ea65..0f5eaa4c3a39 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region, } } -void apply_alternatives_vdso(void) +static void apply_alternatives_vdso(void) { struct alt_region region; const struct elf64_hdr *hdr; -- cgit v1.2.3 From 5b468dad6e5cf4998bdc05efbc5526c111666027 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 17 Nov 2022 18:01:44 +0530 Subject: arm64/mm: Drop unused restore_ttbr1 restore_ttbr1 procedure is not used anywhere, hence just drop it. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20221117123144.403582-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 27e0c75f7818..88175551b401 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -614,17 +614,6 @@ alternative_endif #endif .endm -/* - * Perform the reverse of offset_ttbr1. - * bic is used as it can cover the immediate value and, in future, won't need - * to be nop'ed out when dealing with 52-bit kernel VAs. - */ - .macro restore_ttbr1, ttbr -#ifdef CONFIG_ARM64_VA_BITS_52 - bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -#endif - .endm - /* * Arrange a physical address in a TTBR register, taking care of 52-bit * addresses. -- cgit v1.2.3 From 32d495b0c3305546f4773b9aafcd4e90188ddb9e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 21 Nov 2022 12:52:07 +0000 Subject: Revert "arm64/mm: Drop redundant BUG_ON(!pgtable_alloc)" This reverts commit 9ed2b4616d4e846ece2a04cb5007ce1d1bd9e3f3. Nathan reports early boot failures bisected to this change which look related to the kPTI nG repainting. In any case, consolidating the BUG_ON()s to a single location needs more thought, so revert the change until this is figured out properly. Link: https://lore.kernel.org/r/Y3pS5fdZ3MdLZ00t@dev-arch.thelio-3990X Reported-by: Nathan Chancellor Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 73a12b3abf82..d386033a074c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -207,6 +207,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, if (flags & NO_EXEC_MAPPINGS) pmdval |= PMD_TABLE_PXN; + BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(PAGE_SHIFT); __pmd_populate(pmdp, pte_phys, pmdval); pmd = READ_ONCE(*pmdp); @@ -284,6 +285,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, if (flags & NO_EXEC_MAPPINGS) pudval |= PUD_TABLE_PXN; + BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(PMD_SHIFT); __pud_populate(pudp, pmd_phys, pudval); pud = READ_ONCE(*pudp); @@ -322,6 +324,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, if (flags & NO_EXEC_MAPPINGS) p4dval |= P4D_TABLE_PXN; + BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(PUD_SHIFT); __p4d_populate(p4dp, pud_phys, p4dval); p4d = READ_ONCE(*p4dp); @@ -380,7 +383,6 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, phys &= PAGE_MASK; addr = virt & PAGE_MASK; end = PAGE_ALIGN(virt + size); - BUG_ON(!pgtable_alloc); do { next = pgd_addr_end(addr, end); -- cgit v1.2.3 From a8bf2fc43fc63c0bd38b45c9a9616d43b683585d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 23 Nov 2022 18:02:08 +0000 Subject: arm64/kpti: Move DAIF masking to C code We really don't want to take an exception while replacing TTBR1 so we mask DAIF during the actual update. Currently this is done in the assembly function idmap_cpu_replace_ttbr1() but it could equally be done in the only caller of that function, cpu_replace_ttbr1(). This simplifies the assembly code slightly and means that when working with the code around masking DAIF flags there is one less piece of assembly code which needs to be considered. While we're at it add a comment which makes explicit why we are masking DAIF in this code. There should be no functional effect. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221123180209.634650-2-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 10 ++++++++++ arch/arm64/mm/proc.S | 4 ---- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index d3f8b5df0c1f..72dbd6400549 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; + unsigned long daif; /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); @@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); __cpu_install_idmap(idmap); + + /* + * We really don't want to take *any* exceptions while TTBR1 is + * in the process of being replaced so mask everything. + */ + daif = local_daif_save(); replace_phys(ttbr1); + local_daif_restore(daif); + cpu_uninstall_idmap(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b9ecbbae1e1a..066fa60b93d2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume) * called by anything else. It can only be executed from a TTBR0 mapping. */ SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) - save_and_disable_daif flags=x2 - __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb - restore_daif x2 - ret SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection -- cgit v1.2.3 From d503d01e5016370be8473fc23d800c7ff37ab7f6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 23 Nov 2022 18:02:09 +0000 Subject: arm64/asm: Remove unused assembler DAIF save/restore macros There are no longer any users of the assembler macros for saving and restoring DAIF so remove them to prevent further users being added, there are C equivalents available. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221123180209.634650-3-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 88175551b401..d8d6627be0f6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -34,11 +34,6 @@ wx\n .req w\n .endr - .macro save_and_disable_daif, flags - mrs \flags, daif - msr daifset, #0xf - .endm - .macro disable_daif msr daifset, #0xf .endm @@ -47,10 +42,6 @@ msr daifclr, #0xf .endm - .macro restore_daif, flags:req - msr daif, \flags - .endm - /* * Save/restore interrupts. */ -- cgit v1.2.3 From 67bc5b2d6d4802f46eceda246eb53a672b961831 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sat, 3 Dec 2022 00:18:59 +0800 Subject: arm64: alternatives: add __init/__initconst to some functions/variables apply_alternatives_vdso(), __apply_alternatives_multi_stop() and kernel_alternatives are not needed after booting, so mark the two functions as __init and the var as __initconst. Signed-off-by: Jisheng Zhang Link: https://lore.kernel.org/r/20221202161859.2228-1-jszhang@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/alternative.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 0f5eaa4c3a39..d32d4ed5519b 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region, } } -static void apply_alternatives_vdso(void) +static void __init apply_alternatives_vdso(void) { struct alt_region region; const struct elf64_hdr *hdr; @@ -220,7 +220,7 @@ static void apply_alternatives_vdso(void) __apply_alternatives(®ion, false, &all_capabilities[0]); } -static const struct alt_region kernel_alternatives = { +static const struct alt_region kernel_alternatives __initconst = { .begin = (struct alt_instr *)__alt_instructions, .end = (struct alt_instr *)__alt_instructions_end, }; @@ -229,7 +229,7 @@ static const struct alt_region kernel_alternatives = { * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here. */ -static int __apply_alternatives_multi_stop(void *unused) +static int __init __apply_alternatives_multi_stop(void *unused) { /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { -- cgit v1.2.3