From 4399e6cdf3e920ab99c5f935ecbae88e60682596 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 31 Jan 2020 17:51:06 -0800 Subject: arm64: fix NUMA Kconfig typos Fix typos in arch/arm64/Kconfig: - spell Numa as NUMA - add hyphenation to Non-Uniform Signed-off-by: Randy Dunlap Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..d86d05db3250 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -952,11 +952,11 @@ config HOTPLUG_CPU # Common NUMA Features config NUMA - bool "Numa Memory Allocation and Scheduler Support" + bool "NUMA Memory Allocation and Scheduler Support" select ACPI_NUMA if ACPI select OF_NUMA help - Enable NUMA (Non Uniform Memory Access) support. + Enable NUMA (Non-Uniform Memory Access) support. The kernel will try to allocate memory used by a CPU on the local memory of the CPU and add some more -- cgit v1.2.3 From 25b92693a1b67a47b0c64a3410009d09e9658412 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 13 Feb 2020 12:14:52 +0000 Subject: arm64: mm: convert cpu_do_switch_mm() to C There's no reason that cpu_do_switch_mm() needs to be written as an assembly function, and having it as a C function would make it easier to maintain. This patch converts cpu_do_switch_mm() to C, removing code that this change makes redundant (e.g. the mmid macro). Since the header comment was stale and the prototype now implies all the necessary information, this comment is removed. The 'pgd_phys' argument is made a phys_addr_t to match the return type of virt_to_phys(). At the same time, post_ttbr_update_workaround() is updated to use IS_ENABLED(), which allows the compiler to figure out it can elide calls for !CONFIG_CAVIUM_ERRATUM_27456 builds. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Will Deacon [catalin.marinas@arm.com: change comments from asm-style to C-style] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 6 ------ arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/include/asm/proc-fns.h | 2 -- arch/arm64/mm/context.c | 32 ++++++++++++++++++++++++++++++-- arch/arm64/mm/proc.S | 28 ---------------------------- 5 files changed, 32 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aca337d79d12..af03001293c6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -256,12 +256,6 @@ alternative_endif ldr \rd, [\rn, #VMA_VM_MM] .endm -/* - * mmid - get context id from mm pointer (mm->context.id) - */ - .macro mmid, rd, rn - ldr \rd, [\rn, #MM_CONTEXT_ID] - .endm /* * read_ctr - read CTR_EL0. If the system has mismatched register fields, * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3827ff4040a3..ab46187c6300 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -46,6 +46,8 @@ static inline void cpu_set_reserved_ttbr0(void) isb(); } +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); + static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) { BUG_ON(pgd == swapper_pg_dir); diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index a2ce65a0c1fa..0d5d1f0525eb 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -13,11 +13,9 @@ #include -struct mm_struct; struct cpu_suspend_ctx; extern void cpu_do_idle(void); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8ef73e89d514..8524f03d629c 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -6,6 +6,7 @@ * Copyright (C) 2012 ARM Ltd. */ +#include #include #include #include @@ -254,10 +255,37 @@ switch_mm_fastpath: /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) + return; + asm(ALTERNATIVE("nop; nop; nop", "ic iallu; dsb nsh; isb", - ARM64_WORKAROUND_CAVIUM_27456, - CONFIG_CAVIUM_ERRATUM_27456)); + ARM64_WORKAROUND_CAVIUM_27456)); +} + +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) +{ + unsigned long ttbr1 = read_sysreg(ttbr1_el1); + unsigned long asid = ASID(mm); + unsigned long ttbr0 = phys_to_ttbr(pgd_phys); + + /* Skip CNP for the reserved ASID */ + if (system_supports_cnp() && asid) + ttbr0 |= TTBR_CNP_BIT; + + /* SW PAN needs a copy of the ASID in TTBR0 for entry */ + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN)) + ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + /* Set ASID in TTBR1 since TCR.A1 is set */ + ttbr1 &= ~TTBR_ASID_MASK; + ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + write_sysreg(ttbr1, ttbr1_el1); + isb(); + write_sysreg(ttbr0, ttbr0_el1); + isb(); + post_ttbr_update_workaround(); } static int asids_init(void) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..76899c6eee2b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -142,34 +142,6 @@ SYM_FUNC_END(cpu_do_resume) .popsection #endif -/* - * cpu_do_switch_mm(pgd_phys, tsk) - * - * Set the translation table base pointer to be pgd_phys. - * - * - pgd_phys - physical address of new TTB - */ -SYM_FUNC_START(cpu_do_switch_mm) - mrs x2, ttbr1_el1 - mmid x1, x1 // get mm->context.id - phys_to_ttbr x3, x0 - -alternative_if ARM64_HAS_CNP - cbz x1, 1f // skip CNP for reserved ASID - orr x3, x3, #TTBR_CNP_BIT -1: -alternative_else_nop_endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - bfi x3, x1, #48, #16 // set the ASID field in TTBR0 -#endif - bfi x2, x1, #48, #16 // set the ASID - msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) - isb - msr ttbr0_el1, x3 // now update TTBR0 - isb - b post_ttbr_update_workaround // Back to C code... -SYM_FUNC_END(cpu_do_switch_mm) - .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 -- cgit v1.2.3 From 90765f745b08fdf069e4562f6985bdba0fefad8d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 28 Feb 2020 12:43:55 +0000 Subject: arm64: Update comment for ASID() macro Commit 25b92693a1b6 ("arm64: mm: convert cpu_do_switch_mm() to C") added a new use of the ASID() macro, so update the comment in asm/mmu.h which reasons about why an atomic reload of 'mm->context.id.counter' is not required. Cc: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/mmu.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e4d862420bb4..21a4bcfdb378 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -23,9 +23,9 @@ typedef struct { } mm_context_t; /* - * This macro is only used by the TLBI code, which cannot race with an - * ASID change and therefore doesn't need to reload the counter using - * atomic64_read. + * This macro is only used by the TLBI and low-level switch_mm() code, + * neither of which can race with an ASID change. We therefore don't + * need to reload the counter using atomic64_read(). */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -- cgit v1.2.3 From e424b17985262359181cce1553a1ea2cdf263941 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 21 Feb 2020 19:35:31 +0000 Subject: arm64: perf: Refactor PMU init callbacks The PMU init callbacks are already drowning in boilerplate, so before doubling the number of supported PMU models, give it a sensible refactor to significantly reduce the bloat, both in source and object code. Although nobody uses non-default sysfs attributes today, there's minimal impact to preserving the notion that maybe, some day, somebody might, so we may as well keep up appearances. Acked-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 124 +++++++++-------------------------------- 1 file changed, 27 insertions(+), 97 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e40b65645c86..1e0b04da2f3a 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -953,7 +953,10 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) return probe.present ? 0 : -ENODEV; } -static int armv8_pmu_init(struct arm_pmu *cpu_pmu) +static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, + int (*map_event)(struct perf_event *event), + const struct attribute_group *events, + const struct attribute_group *format) { int ret = armv8pmu_probe_pmu(cpu_pmu); if (ret) @@ -972,135 +975,62 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->filter_match = armv8pmu_filter_match; + cpu_pmu->name = name; + cpu_pmu->map_event = map_event; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? + events : &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? + format : &armv8_pmuv3_format_attr_group; + return 0; } static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_pmuv3"; - cpu_pmu->map_event = armv8_pmuv3_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_pmuv3", + armv8_pmuv3_map_event, NULL, NULL); } static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a35"; - cpu_pmu->map_event = armv8_a53_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", + armv8_a53_map_event, NULL, NULL); } static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a53"; - cpu_pmu->map_event = armv8_a53_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", + armv8_a53_map_event, NULL, NULL); } static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a57"; - cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", + armv8_a57_map_event, NULL, NULL); } static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a72"; - cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", + armv8_a57_map_event, NULL, NULL); } static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a73"; - cpu_pmu->map_event = armv8_a73_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", + armv8_a73_map_event, NULL, NULL); } static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cavium_thunder"; - cpu_pmu->map_event = armv8_thunder_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", + armv8_thunder_map_event, NULL, NULL); } static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_brcm_vulcan"; - cpu_pmu->map_event = armv8_vulcan_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", + armv8_vulcan_map_event, NULL, NULL); } static const struct of_device_id armv8_pmu_of_device_ids[] = { -- cgit v1.2.3 From 29cc4ceeac1274ab8363a11b81ebd99f3b023985 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 21 Feb 2020 19:35:32 +0000 Subject: arm64: perf: Support new DT compatibles Add support for matching the new PMUs. For now, this just wires them up as generic PMUv3 such that people writing DTs for new SoCs can do the right thing, and at least have architectural and raw events be usable. We can come back and fill in event maps for sysfs and/or perf tools at a later date. Acked-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 56 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1e0b04da2f3a..726cd8bda025 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -991,6 +991,12 @@ static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) armv8_pmuv3_map_event, NULL, NULL); } +static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", @@ -1003,12 +1009,24 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) armv8_a53_map_event, NULL, NULL); } +static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event, NULL, NULL); } +static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", @@ -1021,6 +1039,36 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) armv8_a73_map_event, NULL, NULL); } +static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", @@ -1035,11 +1083,19 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, + {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init}, {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, + {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init}, {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, + {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init}, {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, + {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, + {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, + {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, + {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, + {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, {}, -- cgit v1.2.3 From bf2b59f60ee1fefa768d62ec6e8f4b4d9e04c691 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 4 Mar 2020 09:58:42 +0530 Subject: arm64/mm: Hold memory hotplug lock while walking for kernel page table dump The arm64 page table dump code can race with concurrent modification of the kernel page tables. When a leaf entries are modified concurrently, the dump code may log stale or inconsistent information for a VA range, but this is otherwise not harmful. When intermediate levels of table are freed, the dump code will continue to use memory which has been freed and potentially reallocated for another purpose. In such cases, the dump code may dereference bogus addresses, leading to a number of potential problems. Intermediate levels of table may by freed during memory hot-remove, which will be enabled by a subsequent patch. To avoid racing with this, take the memory hotplug lock when walking the kernel page table. Acked-by: David Hildenbrand Acked-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Steven Price Signed-off-by: Anshuman Khandual Signed-off-by: Catalin Marinas --- arch/arm64/mm/ptdump_debugfs.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 1f2eae3e988b..d29d722ec3ec 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include @@ -7,7 +8,10 @@ static int ptdump_show(struct seq_file *m, void *v) { struct ptdump_info *info = m->private; + + get_online_mems(); ptdump_walk(m, info); + put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); -- cgit v1.2.3 From bbd6ec605c0fc286c3f8ce60b4ed44635361d58b Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 4 Mar 2020 09:58:43 +0530 Subject: arm64/mm: Enable memory hot remove The arch code for hot-remove must tear down portions of the linear map and vmemmap corresponding to memory being removed. In both cases the page tables mapping these regions must be freed, and when sparse vmemmap is in use the memory backing the vmemmap must also be freed. This patch adds unmap_hotplug_range() and free_empty_tables() helpers which can be used to tear down either region and calls it from vmemmap_free() and ___remove_pgd_mapping(). The free_mapped argument determines whether the backing memory will be freed. It makes two distinct passes over the kernel page table. In the first pass with unmap_hotplug_range() it unmaps, invalidates applicable TLB cache and frees backing memory if required (vmemmap) for each mapped leaf entry. In the second pass with free_empty_tables() it looks for empty page table sections whose page table page can be unmapped, TLB invalidated and freed. While freeing intermediate level page table pages bail out if any of its entries are still valid. This can happen for partially filled kernel page table either from a previously attempted failed memory hot add or while removing an address range which does not span the entire page table page range. The vmemmap region may share levels of table with the vmalloc region. There can be conflicts between hot remove freeing page table pages with a concurrent vmalloc() walking the kernel page table. This conflict can not just be solved by taking the init_mm ptl because of existing locking scheme in vmalloc(). So free_empty_tables() implements a floor and ceiling method which is borrowed from user page table tear with free_pgd_range() which skips freeing page table pages if intermediate address range is not aligned or maximum floor-ceiling might not own the entire page table page. Boot memory on arm64 cannot be removed. Hence this registers a new memory hotplug notifier which prevents boot memory offlining and it's removal. While here update arch_add_memory() to handle __add_pages() failures by just unmapping recently added kernel linear mapping. Now enable memory hot remove on arm64 platforms by default with ARCH_ENABLE_MEMORY_HOTREMOVE. This implementation is overall inspired from kernel page table tear down procedure on X86 architecture and user page table tear down method. [Mike and Catalin added P4D page table level support] Reviewed-by: Catalin Marinas Signed-off-by: Mike Rapoport Signed-off-by: Catalin Marinas Signed-off-by: Anshuman Khandual Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 + arch/arm64/include/asm/memory.h | 1 + arch/arm64/mm/mmu.c | 379 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 374 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..8fb0ba221a26 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -281,6 +281,9 @@ config ZONE_DMA32 config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y +config ARCH_ENABLE_MEMORY_HOTREMOVE + def_bool y + config SMP def_bool y diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 4d94676e5a8b..2be67b232499 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -54,6 +54,7 @@ #define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) +#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 128f70852bf3..9b08f7c7e6f0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -724,6 +725,312 @@ int kern_addr_valid(unsigned long addr) return pfn_valid(pte_pfn(pte)); } + +#ifdef CONFIG_MEMORY_HOTPLUG +static void free_hotplug_page_range(struct page *page, size_t size) +{ + WARN_ON(PageReserved(page)); + free_pages((unsigned long)page_address(page), get_order(size)); +} + +static void free_hotplug_pgtable_page(struct page *page) +{ + free_hotplug_page_range(page, PAGE_SIZE); +} + +static bool pgtable_range_aligned(unsigned long start, unsigned long end, + unsigned long floor, unsigned long ceiling, + unsigned long mask) +{ + start &= mask; + if (start < floor) + return false; + + if (ceiling) { + ceiling &= mask; + if (!ceiling) + return false; + } + + if (end - 1 > ceiling - 1) + return false; + return true; +} + +static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, + unsigned long end, bool free_mapped) +{ + pte_t *ptep, pte; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + if (pte_none(pte)) + continue; + + WARN_ON(!pte_present(pte)); + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pte_page(pte), PAGE_SIZE); + } while (addr += PAGE_SIZE, addr < end); +} + +static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, + unsigned long end, bool free_mapped) +{ + unsigned long next; + pmd_t *pmdp, pmd; + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + if (pmd_sect(pmd)) { + pmd_clear(pmdp); + + /* + * One TLBI should be sufficient here as the PMD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pmd_page(pmd), + PMD_SIZE); + continue; + } + WARN_ON(!pmd_table(pmd)); + unmap_hotplug_pte_range(pmdp, addr, next, free_mapped); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, + unsigned long end, bool free_mapped) +{ + unsigned long next; + pud_t *pudp, pud; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + if (pud_sect(pud)) { + pud_clear(pudp); + + /* + * One TLBI should be sufficient here as the PUD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pud_page(pud), + PUD_SIZE); + continue; + } + WARN_ON(!pud_table(pud)); + unmap_hotplug_pmd_range(pudp, addr, next, free_mapped); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, + unsigned long end, bool free_mapped) +{ + unsigned long next; + p4d_t *p4dp, p4d; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + unmap_hotplug_pud_range(p4dp, addr, next, free_mapped); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_range(unsigned long addr, unsigned long end, + bool free_mapped) +{ + unsigned long next; + pgd_t *pgdp, pgd; + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped); + } while (addr = next, addr < end); +} + +static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pte_t *ptep, pte; + unsigned long i, start = addr; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + /* + * This is just a sanity check here which verifies that + * pte clearing has been done by earlier unmap loops. + */ + WARN_ON(!pte_none(pte)); + } while (addr += PAGE_SIZE, addr < end); + + if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) + return; + + /* + * Check whether we can free the pte page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + ptep = pte_offset_kernel(pmdp, 0UL); + for (i = 0; i < PTRS_PER_PTE; i++) { + if (!pte_none(READ_ONCE(ptep[i]))) + return; + } + + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(ptep)); +} + +static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pmd_t *pmdp, pmd; + unsigned long i, next, start = addr; + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); + free_empty_pte_table(pmdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); + + if (CONFIG_PGTABLE_LEVELS <= 2) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) + return; + + /* + * Check whether we can free the pmd page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pmdp = pmd_offset(pudp, 0UL); + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(READ_ONCE(pmdp[i]))) + return; + } + + pud_clear(pudp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pmdp)); +} + +static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pud_t *pudp, pud; + unsigned long i, next, start = addr; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); + free_empty_pmd_table(pudp, addr, next, floor, ceiling); + } while (addr = next, addr < end); + + if (CONFIG_PGTABLE_LEVELS <= 3) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) + return; + + /* + * Check whether we can free the pud page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pudp = pud_offset(p4dp, 0UL); + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_none(READ_ONCE(pudp[i]))) + return; + } + + p4d_clear(p4dp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pudp)); +} + +static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + unsigned long next; + p4d_t *p4dp, p4d; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + free_empty_pud_table(p4dp, addr, next, floor, ceiling); + } while (addr = next, addr < end); +} + +static void free_empty_tables(unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + unsigned long next; + pgd_t *pgdp, pgd; + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + free_empty_p4d_table(pgdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); +} +#endif + #ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, @@ -771,6 +1078,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { +#ifdef CONFIG_MEMORY_HOTPLUG + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + unmap_hotplug_range(start, end, true); + free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); +#endif } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ @@ -1049,10 +1362,21 @@ int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) } #ifdef CONFIG_MEMORY_HOTPLUG +static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) +{ + unsigned long end = start + size; + + WARN_ON(pgdir != init_mm.pgd); + WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); + + unmap_hotplug_range(start, end, false); + free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); +} + int arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions) { - int flags = 0; + int ret, flags = 0; if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; @@ -1062,22 +1386,59 @@ int arch_add_memory(int nid, u64 start, u64 size, memblock_clear_nomap(start, size); - return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, restrictions); + if (ret) + __remove_pgd_mapping(swapper_pg_dir, + __phys_to_virt(start), size); + return ret; } + void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - /* - * FIXME: Cleanup page tables (also in arch_add_memory() in case - * adding fails). Until then, this function should only be used - * during memory hotplug (adding memory), not for memory - * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be - * unlocked yet. - */ __remove_pages(start_pfn, nr_pages, altmap); + __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); +} + +/* + * This memory hotplug notifier helps prevent boot memory from being + * inadvertently removed as it blocks pfn range offlining process in + * __offline_pages(). Hence this prevents both offlining as well as + * removal process for boot memory which is initially always online. + * In future if and when boot memory could be removed, this notifier + * should be dropped and free_hotplug_page_range() should handle any + * reserved pages allocated during boot. + */ +static int prevent_bootmem_remove_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct mem_section *ms; + struct memory_notify *arg = data; + unsigned long end_pfn = arg->start_pfn + arg->nr_pages; + unsigned long pfn = arg->start_pfn; + + if (action != MEM_GOING_OFFLINE) + return NOTIFY_OK; + + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + ms = __pfn_to_section(pfn); + if (early_section(ms)) + return NOTIFY_BAD; + } + return NOTIFY_OK; +} + +static struct notifier_block prevent_bootmem_remove_nb = { + .notifier_call = prevent_bootmem_remove_notifier, +}; + +static int __init prevent_bootmem_remove_init(void) +{ + return register_memory_notifier(&prevent_bootmem_remove_nb); } +device_initcall(prevent_bootmem_remove_init); #endif -- cgit v1.2.3 From 857a141d7fb74237a7a47be40f18e477027f1457 Mon Sep 17 00:00:00 2001 From: Remi Denis-Courmont Date: Wed, 4 Mar 2020 11:35:46 +0200 Subject: arm64: remove gratuitious/stray .ltorg stanzas There are no applicable literals above them. Acked-by: Mark Rutland Signed-off-by: Remi Denis-Courmont Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 1 - arch/arm64/kernel/hibernate-asm.S | 2 -- 2 files changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..f79023c9b374 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -404,7 +404,6 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) - .ltorg /* * The following fragment of code is executed with the MMU enabled. diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 38bcd4d4e43b..6532105b3e32 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -110,8 +110,6 @@ ENTRY(swsusp_arch_suspend_exit) cbz x24, 3f /* Do we need to re-initialise EL2? */ hvc #0 3: ret - - .ltorg ENDPROC(swsusp_arch_suspend_exit) /* -- cgit v1.2.3 From 2c9d45b43c39e26fd2a73f2203321cdaee98b58b Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:21 +0000 Subject: arm64: add support for the AMU extension v1 The activity monitors extension is an optional extension introduced by the ARMv8.4 CPU architecture. This implements basic support for version 1 of the activity monitors architecture, AMUv1. This support includes: - Extension detection on each CPU (boot, secondary, hotplugged) - Register interface for AMU aarch64 registers Signed-off-by: Ionela Voinescu Reviewed-by: Valentin Schneider Cc: Mark Rutland Cc: Marc Zyngier Cc: Suzuki K Poulose Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 27 +++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/cpufeature.h | 5 +++ arch/arm64/include/asm/sysreg.h | 38 +++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 66 +++++++++++++++++++++++++++++++++++++ 5 files changed, 138 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..fa4e3737149c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1517,6 +1517,33 @@ config ARM64_PTR_AUTH endmenu +menu "ARMv8.4 architectural features" + +config ARM64_AMU_EXTN + bool "Enable support for the Activity Monitors Unit CPU extension" + default y + help + The activity monitors extension is an optional extension introduced + by the ARMv8.4 CPU architecture. This enables support for version 1 + of the activity monitors architecture, AMUv1. + + To enable the use of this extension on CPUs that implement it, say Y. + + Note that for architectural reasons, firmware _must_ implement AMU + support when running on CPUs that present the activity monitors + extension. The required support is present in: + * Version 1.5 and later of the ARM Trusted Firmware + + For kernels that have this configuration enabled but boot with broken + firmware, you may need to say N here until the firmware is fixed. + Otherwise you may experience firmware panics or lockups when + accessing the counter registers. Even if you are not observing these + symptoms, the values returned by the register reads might not + correctly reflect reality. Most commonly, the value read will be 0, + indicating that the counter is not enabled. + +endmenu + menu "ARMv8.5 architectural features" config ARM64_E0PD diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 865e0253fc1e..185e44aa2713 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -58,7 +58,8 @@ #define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_HAS_E0PD 49 #define ARM64_HAS_RNG 50 +#define ARM64_HAS_AMU_EXTN 51 -#define ARM64_NCAPS 51 +#define ARM64_NCAPS 52 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..485e069d8768 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -678,6 +678,11 @@ static inline bool cpu_has_hw_af(void) ID_AA64MMFR1_HADBS_SHIFT); } +#ifdef CONFIG_ARM64_AMU_EXTN +/* Check whether the cpu supports the Activity Monitors Unit (AMU) */ +extern bool cpu_has_amu_feat(int cpu); +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b91570ff9db1..085d248f824e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -386,6 +386,42 @@ #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) +/* Definitions for system register interface to AMU for ARMv8.4 onwards */ +#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2)) +#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0) +#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1) +#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2) +#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3) +#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4) +#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5) +#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0) +#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1) + +/* + * Group 0 of activity monitors (architected): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 010:n<3> n<2:0> + * Type: 11 011 1101 011:n<3> n<2:0> + * n: 0-15 + * + * Group 1 of activity monitors (auxiliary): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 110:n<3> n<2:0> + * Type: 11 011 1101 111:n<3> n<2:0> + * n: 0-15 + */ + +#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPE0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7) +#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPE1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7) + +/* AMU v1: Fixed (architecturally defined) activity monitors */ +#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0) +#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1) +#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2) +#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3) + #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) #define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) @@ -598,6 +634,7 @@ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_AMU_SHIFT 44 #define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_GIC_SHIFT 24 @@ -608,6 +645,7 @@ #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#define ID_AA64PFR0_AMU 0x1 #define ID_AA64PFR0_SVE 0x1 #define ID_AA64PFR0_RAS_V1 0x1 #define ID_AA64PFR0_FP_NI 0xf diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..301538d3a197 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -163,6 +163,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), @@ -1222,6 +1223,53 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, #endif +#ifdef CONFIG_ARM64_AMU_EXTN + +/* + * The "amu_cpus" cpumask only signals that the CPU implementation for the + * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide + * information regarding all the events that it supports. When a CPU bit is + * set in the cpumask, the user of this feature can only rely on the presence + * of the 4 fixed counters for that CPU. But this does not guarantee that the + * counters are enabled or access to these counters is enabled by code + * executed at higher exception levels (firmware). + */ +static struct cpumask amu_cpus __read_mostly; + +bool cpu_has_amu_feat(int cpu) +{ + return cpumask_test_cpu(cpu, &amu_cpus); +} + +static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) +{ + if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { + pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", + smp_processor_id()); + cpumask_set_cpu(smp_processor_id(), &amu_cpus); + } +} + +static bool has_amu(const struct arm64_cpu_capabilities *cap, + int __unused) +{ + /* + * The AMU extension is a non-conflicting feature: the kernel can + * safely run a mix of CPUs with and without support for the + * activity monitors extension. Therefore, unconditionally enable + * the capability to allow any late CPU to use the feature. + * + * With this feature unconditionally enabled, the cpu_enable + * function will be called for all CPUs that match the criteria, + * including secondary and hotplugged, marking this feature as + * present on that respective CPU. The enable function will also + * print a detection message. + */ + + return true; +} +#endif + #ifdef CONFIG_ARM64_VHE static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) { @@ -1499,6 +1547,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_ARM64_AMU_EXTN + { + /* + * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y. + * Therefore, don't provide .desc as we don't want the detection + * message to be shown until at least one CPU is detected to + * support the feature. + */ + .capability = ARM64_HAS_AMU_EXTN, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_amu, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR0_AMU_SHIFT, + .min_field_value = ID_AA64PFR0_AMU, + .cpu_enable = cpu_amu_enable, + }, +#endif /* CONFIG_ARM64_AMU_EXTN */ { .desc = "Data cache clean to the PoU not required for I/D coherence", .capability = ARM64_HAS_CACHE_IDC, -- cgit v1.2.3 From 87a1f063464afd934f0f22aac710ca65bef77af3 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:22 +0000 Subject: arm64: trap to EL1 accesses to AMU counters from EL0 The activity monitors extension is an optional extension introduced by the ARMv8.4 CPU architecture. In order to access the activity monitors counters safely, if desired, the kernel should detect the presence of the extension through the feature register, and mediate the access. Therefore, disable direct accesses to activity monitors counters from EL0 (userspace) and trap them to EL1 (kernel). To be noted that the ARM64_AMU_EXTN kernel config does not have an effect on this code. Given that the amuserenr_el0 resets to an UNKNOWN value, setting the trap of EL0 accesses to EL1 is always attempted for safety and security considerations. Therefore firmware should still ensure accesses to AMU registers are not trapped in EL2/EL3 as this code cannot be bypassed if the CPU implements the Activity Monitors Unit. Signed-off-by: Ionela Voinescu Reviewed-by: James Morse Reviewed-by: Valentin Schneider Reviewed-by: Suzuki K Poulose Cc: Steve Capper Cc: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 ++++++++++ arch/arm64/mm/proc.S | 3 +++ 2 files changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aca337d79d12..c5487806273f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -430,6 +430,16 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU 9000: .endm +/* + * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present + */ + .macro reset_amuserenr_el0, tmpreg + mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 + ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 + cbz \tmpreg, .Lskip_\@ // Skip if no AMU present + msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 +.Lskip_\@: + .endm /* * copy_page - copy src to dest using temp registers t1-t8 */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..7103027b4e64 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -131,6 +131,7 @@ alternative_endif ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr @@ -423,6 +424,8 @@ SYM_FUNC_START(__cpu_setup) isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 + /* * Memory region attributes */ -- cgit v1.2.3 From 4fcdf106a4330bb5c2306a1efbb3af3b7c0db537 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:23 +0000 Subject: arm64/kvm: disable access to AMU registers from kvm guests Access to the AMU counters should be disabled by default in kvm guests, as information from the counters might reveal activity in other guests or activity on the host. Therefore, disable access to AMU registers from EL0 and EL1 in kvm guests by: - Hiding the presence of the extension in the feature register (SYS_ID_AA64PFR0_EL1) on the VCPU. - Disabling access to the AMU registers before switching to the guest. - Trapping accesses and injecting an undefined instruction into the guest. Signed-off-by: Ionela Voinescu Reviewed-by: Suzuki K Poulose Reviewed-by: Valentin Schneider Acked-by: Marc Zyngier Cc: Will Deacon Cc: Catalin Marinas Cc: Suzuki K Poulose Cc: Julien Thierry Cc: James Morse Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/kvm/hyp/switch.c | 14 +++++- arch/arm64/kvm/sys_regs.c | 93 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 105 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 6e5d839f42b5..51c1d9918999 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -267,6 +267,7 @@ /* Hyp Coprocessor Trap Register */ #define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TAM (1 << 30) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index dfe8dd172512..46292a370781 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -98,6 +98,18 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; + + /* + * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to + * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, + * except for some missing controls, such as TAM. + * In this case, CPTR_EL2.TAM has the same position with or without + * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM + * shift value for trapping the AMU accesses. + */ + + val |= CPTR_EL2_TAM; + if (update_fp_enabled(vcpu)) { if (vcpu_has_sve(vcpu)) val |= CPACR_EL1_ZEN; @@ -119,7 +131,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ; + val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { val |= CPTR_EL2_TFP; __activate_traps_fpsimd32(vcpu); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..44354c812783 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1003,6 +1003,20 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } +static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + + return false; +} + +/* Macro to expand the AMU counter and type registers*/ +#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } +#define AMU_AMEVTYPE0_EL0(n) { SYS_DESC(SYS_AMEVTYPE0_EL0(n)), access_amu } +#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } +#define AMU_AMEVTYPE1_EL0(n) { SYS_DESC(SYS_AMEVTYPE1_EL0(n)), access_amu } + static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) @@ -1078,8 +1092,10 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + if (id == SYS_ID_AA64PFR0_EL1) { + if (!vcpu_has_sve(vcpu)) + val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) | @@ -1565,6 +1581,79 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, + { SYS_DESC(SYS_AMCR_EL0), access_amu }, + { SYS_DESC(SYS_AMCFGR_EL0), access_amu }, + { SYS_DESC(SYS_AMCGCR_EL0), access_amu }, + { SYS_DESC(SYS_AMUSERENR_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu }, + AMU_AMEVCNTR0_EL0(0), + AMU_AMEVCNTR0_EL0(1), + AMU_AMEVCNTR0_EL0(2), + AMU_AMEVCNTR0_EL0(3), + AMU_AMEVCNTR0_EL0(4), + AMU_AMEVCNTR0_EL0(5), + AMU_AMEVCNTR0_EL0(6), + AMU_AMEVCNTR0_EL0(7), + AMU_AMEVCNTR0_EL0(8), + AMU_AMEVCNTR0_EL0(9), + AMU_AMEVCNTR0_EL0(10), + AMU_AMEVCNTR0_EL0(11), + AMU_AMEVCNTR0_EL0(12), + AMU_AMEVCNTR0_EL0(13), + AMU_AMEVCNTR0_EL0(14), + AMU_AMEVCNTR0_EL0(15), + AMU_AMEVTYPE0_EL0(0), + AMU_AMEVTYPE0_EL0(1), + AMU_AMEVTYPE0_EL0(2), + AMU_AMEVTYPE0_EL0(3), + AMU_AMEVTYPE0_EL0(4), + AMU_AMEVTYPE0_EL0(5), + AMU_AMEVTYPE0_EL0(6), + AMU_AMEVTYPE0_EL0(7), + AMU_AMEVTYPE0_EL0(8), + AMU_AMEVTYPE0_EL0(9), + AMU_AMEVTYPE0_EL0(10), + AMU_AMEVTYPE0_EL0(11), + AMU_AMEVTYPE0_EL0(12), + AMU_AMEVTYPE0_EL0(13), + AMU_AMEVTYPE0_EL0(14), + AMU_AMEVTYPE0_EL0(15), + AMU_AMEVCNTR1_EL0(0), + AMU_AMEVCNTR1_EL0(1), + AMU_AMEVCNTR1_EL0(2), + AMU_AMEVCNTR1_EL0(3), + AMU_AMEVCNTR1_EL0(4), + AMU_AMEVCNTR1_EL0(5), + AMU_AMEVCNTR1_EL0(6), + AMU_AMEVCNTR1_EL0(7), + AMU_AMEVCNTR1_EL0(8), + AMU_AMEVCNTR1_EL0(9), + AMU_AMEVCNTR1_EL0(10), + AMU_AMEVCNTR1_EL0(11), + AMU_AMEVCNTR1_EL0(12), + AMU_AMEVCNTR1_EL0(13), + AMU_AMEVCNTR1_EL0(14), + AMU_AMEVCNTR1_EL0(15), + AMU_AMEVTYPE1_EL0(0), + AMU_AMEVTYPE1_EL0(1), + AMU_AMEVTYPE1_EL0(2), + AMU_AMEVTYPE1_EL0(3), + AMU_AMEVTYPE1_EL0(4), + AMU_AMEVTYPE1_EL0(5), + AMU_AMEVTYPE1_EL0(6), + AMU_AMEVTYPE1_EL0(7), + AMU_AMEVTYPE1_EL0(8), + AMU_AMEVTYPE1_EL0(9), + AMU_AMEVTYPE1_EL0(10), + AMU_AMEVTYPE1_EL0(11), + AMU_AMEVTYPE1_EL0(12), + AMU_AMEVTYPE1_EL0(13), + AMU_AMEVTYPE1_EL0(14), + AMU_AMEVTYPE1_EL0(15), + { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, -- cgit v1.2.3 From cd0ed03a8903a0b0c6fc36e32d133d1ddfe70cd6 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:26 +0000 Subject: arm64: use activity monitors for frequency invariance The Frequency Invariance Engine (FIE) is providing a frequency scaling correction factor that helps achieve more accurate load-tracking. So far, for arm and arm64 platforms, this scale factor has been obtained based on the ratio between the current frequency and the maximum supported frequency recorded by the cpufreq policy. The setting of this scale factor is triggered from cpufreq drivers by calling arch_set_freq_scale. The current frequency used in computation is the frequency requested by a governor, but it may not be the frequency that was implemented by the platform. This correction factor can also be obtained using a core counter and a constant counter to get information on the performance (frequency based only) obtained in a period of time. This will more accurately reflect the actual current frequency of the CPU, compared with the alternative implementation that reflects the request of a performance level from the OS. Therefore, implement arch_scale_freq_tick to use activity monitors, if present, for the computation of the frequency scale factor. The use of AMU counters depends on: - CONFIG_ARM64_AMU_EXTN - depents on the AMU extension being present - CONFIG_CPU_FREQ - the current frequency obtained using counter information is divided by the maximum frequency obtained from the cpufreq policy. While it is possible to have a combination of CPUs in the system with and without support for activity monitors, the use of counters for frequency invariance is only enabled for a CPU if all related CPUs (CPUs in the same frequency domain) support and have enabled the core and constant activity monitor counters. In this way, there is a clear separation between the policies for which arch_set_freq_scale (cpufreq based FIE) is used, and the policies for which arch_scale_freq_tick (counter based FIE) is used to set the frequency scale factor. For this purpose, a late_initcall_sync is registered to trigger validation work for policies that will enable or disable the use of AMU counters for frequency invariance. If CONFIG_CPU_FREQ is not defined, the use of counters is enabled on all CPUs only if all possible CPUs correctly support the necessary counters. Signed-off-by: Ionela Voinescu Reviewed-by: Lukasz Luba Acked-by: Sudeep Holla Cc: Sudeep Holla Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/topology.h | 9 ++ arch/arm64/kernel/cpufeature.c | 4 + arch/arm64/kernel/topology.c | 180 ++++++++++++++++++++++++++++++++++++++ drivers/base/arch_topology.c | 12 +++ include/linux/arch_topology.h | 2 + 5 files changed, 207 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index a4d945db95a2..21d4d40d6243 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -16,6 +16,15 @@ int pcibus_to_node(struct pci_bus *bus); #include +#ifdef CONFIG_ARM64_AMU_EXTN +/* + * Replace task scheduler's default counter-based + * frequency-invariance scale factor setting. + */ +void topology_scale_freq_tick(void); +#define arch_scale_freq_tick topology_scale_freq_tick +#endif /* CONFIG_ARM64_AMU_EXTN */ + /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_capacity topology_get_freq_scale diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 301538d3a197..bf98a2386534 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1241,12 +1241,16 @@ bool cpu_has_amu_feat(int cpu) return cpumask_test_cpu(cpu, &amu_cpus); } +/* Initialize the use of AMU counters for frequency invariance */ +extern void init_cpu_freq_invariance_counters(void); + static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); + init_cpu_freq_invariance_counters(); } } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index fa9528dfd0ce..0801a0f3c156 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -120,4 +121,183 @@ int __init parse_acpi_topology(void) } #endif +#ifdef CONFIG_ARM64_AMU_EXTN +#undef pr_fmt +#define pr_fmt(fmt) "AMU: " fmt + +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); +static DEFINE_PER_CPU(u64, arch_const_cycles_prev); +static DEFINE_PER_CPU(u64, arch_core_cycles_prev); +static cpumask_var_t amu_fie_cpus; + +/* Initialize counter reference per-cpu variables for the current CPU */ +void init_cpu_freq_invariance_counters(void) +{ + this_cpu_write(arch_core_cycles_prev, + read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)); + this_cpu_write(arch_const_cycles_prev, + read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)); +} + +static int validate_cpu_freq_invariance_counters(int cpu) +{ + u64 max_freq_hz, ratio; + + if (!cpu_has_amu_feat(cpu)) { + pr_debug("CPU%d: counters are not supported.\n", cpu); + return -EINVAL; + } + + if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || + !per_cpu(arch_core_cycles_prev, cpu))) { + pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); + return -EINVAL; + } + + /* Convert maximum frequency from KHz to Hz and validate */ + max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; + if (unlikely(!max_freq_hz)) { + pr_debug("CPU%d: invalid maximum frequency.\n", cpu); + return -EINVAL; + } + + /* + * Pre-compute the fixed ratio between the frequency of the constant + * counter and the maximum frequency of the CPU. + * + * const_freq + * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE² + * cpuinfo_max_freq + * + * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE² + * in order to ensure a good resolution for arch_max_freq_scale for + * very low arch timer frequencies (down to the KHz range which should + * be unlikely). + */ + ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT); + ratio = div64_u64(ratio, max_freq_hz); + if (!ratio) { + WARN_ONCE(1, "System timer frequency too low.\n"); + return -EINVAL; + } + + per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; + + return 0; +} + +static inline bool +enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) { + pr_debug("CPU%d: No cpufreq policy found.\n", cpu); + return false; + } + + if (cpumask_subset(policy->related_cpus, valid_cpus)) + cpumask_or(amu_fie_cpus, policy->related_cpus, + amu_fie_cpus); + + cpufreq_cpu_put(policy); + + return true; +} + +static DEFINE_STATIC_KEY_FALSE(amu_fie_key); +#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) + +static int __init init_amu_fie(void) +{ + cpumask_var_t valid_cpus; + bool have_policy = false; + int ret = 0; + int cpu; + + if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL)) + return -ENOMEM; + + if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_valid_mask; + } + + for_each_present_cpu(cpu) { + if (validate_cpu_freq_invariance_counters(cpu)) + continue; + cpumask_set_cpu(cpu, valid_cpus); + have_policy |= enable_policy_freq_counters(cpu, valid_cpus); + } + + /* + * If we are not restricted by cpufreq policies, we only enable + * the use of the AMU feature for FIE if all CPUs support AMU. + * Otherwise, enable_policy_freq_counters has already enabled + * policy cpus. + */ + if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask)) + cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus); + + if (!cpumask_empty(amu_fie_cpus)) { + pr_info("CPUs[%*pbl]: counters will be used for FIE.", + cpumask_pr_args(amu_fie_cpus)); + static_branch_enable(&amu_fie_key); + } + +free_valid_mask: + free_cpumask_var(valid_cpus); + + return ret; +} +late_initcall_sync(init_amu_fie); + +bool arch_freq_counters_available(struct cpumask *cpus) +{ + return amu_freq_invariant() && + cpumask_subset(cpus, amu_fie_cpus); +} + +void topology_scale_freq_tick(void) +{ + u64 prev_core_cnt, prev_const_cnt; + u64 core_cnt, const_cnt, scale; + int cpu = smp_processor_id(); + + if (!amu_freq_invariant()) + return; + + if (!cpumask_test_cpu(cpu, amu_fie_cpus)) + return; + + const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0); + core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0); + prev_const_cnt = this_cpu_read(arch_const_cycles_prev); + prev_core_cnt = this_cpu_read(arch_core_cycles_prev); + + if (unlikely(core_cnt <= prev_core_cnt || + const_cnt <= prev_const_cnt)) + goto store_and_exit; + + /* + * /\core arch_max_freq_scale + * scale = ------- * -------------------- + * /\const SCHED_CAPACITY_SCALE + * + * See validate_cpu_freq_invariance_counters() for details on + * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. + */ + scale = core_cnt - prev_core_cnt; + scale *= this_cpu_read(arch_max_freq_scale); + scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, + const_cnt - prev_const_cnt); + + scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); + this_cpu_write(freq_scale, (unsigned long)scale); + +store_and_exit: + this_cpu_write(arch_core_cycles_prev, core_cnt); + this_cpu_write(arch_const_cycles_prev, const_cnt); +} +#endif /* CONFIG_ARM64_AMU_EXTN */ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 6119e11a9f95..8d63673c1689 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,6 +21,10 @@ #include #include +__weak bool arch_freq_counters_available(struct cpumask *cpus) +{ + return false; +} DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, @@ -29,6 +33,14 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, unsigned long scale; int i; + /* + * If the use of counters for FIE is enabled, just return as we don't + * want to update the scale factor with information from CPUFREQ. + * Instead the scale factor will be updated from arch_scale_freq_tick. + */ + if (arch_freq_counters_available(cpus)) + return; + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; for_each_cpu(i, cpus) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 3015ecbb90b1..1ccdddb541a7 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -33,6 +33,8 @@ unsigned long topology_get_freq_scale(int cpu) return per_cpu(freq_scale, cpu); } +bool arch_freq_counters_available(struct cpumask *cpus); + struct cpu_topology { int thread_id; int core_id; -- cgit v1.2.3 From 27afb236fe5adaa3911e47c91057ba783549226f Mon Sep 17 00:00:00 2001 From: 王程刚 Date: Mon, 9 Mar 2020 15:21:42 +0800 Subject: arch/arm64: fix typo in a comment Fix typo in a comment in arch/arm64/include/asm/esr.h "Unallocted" -> "Unallocated" Signed-off-by: Chenggang Wang Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index cb29253ae86b..6a395a7e6707 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -60,7 +60,7 @@ #define ESR_ELx_EC_BKPT32 (0x38) /* Unallocated EC: 0x39 */ #define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ -/* Unallocted EC: 0x3B */ +/* Unallocated EC: 0x3B */ #define ESR_ELx_EC_BRK64 (0x3C) /* Unallocated EC: 0x3D - 0x3F */ #define ESR_ELx_EC_MAX (0x3F) -- cgit v1.2.3 From 2ca86c346277a121ec0194d74d1ab7d984f756e7 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:25 +0000 Subject: arm64: crypto: Modernize some extra assembly annotations A couple of functions were missed in the modernisation of assembly macros, update them too. Signed-off-by: Mark Brown Reviewed-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/crypto/ghash-ce-core.S | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index 084c6a30b03a..6b958dcdf136 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 ) * struct ghash_key const *k, u64 dg[], u8 ctr[], * int rounds, u8 tag) */ -ENTRY(pmull_gcm_encrypt) +SYM_FUNC_START(pmull_gcm_encrypt) pmull_gcm_do_crypt 1 -ENDPROC(pmull_gcm_encrypt) +SYM_FUNC_END(pmull_gcm_encrypt) /* * void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[], * struct ghash_key const *k, u64 dg[], u8 ctr[], * int rounds, u8 tag) */ -ENTRY(pmull_gcm_decrypt) +SYM_FUNC_START(pmull_gcm_decrypt) pmull_gcm_do_crypt 0 -ENDPROC(pmull_gcm_decrypt) +SYM_FUNC_END(pmull_gcm_decrypt) -pmull_gcm_ghash_4x: +SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x) movi MASK.16b, #0xe1 shl MASK.2d, MASK.2d, #57 @@ -681,9 +681,9 @@ pmull_gcm_ghash_4x: eor XL.16b, XL.16b, T2.16b ret -ENDPROC(pmull_gcm_ghash_4x) +SYM_FUNC_END(pmull_gcm_ghash_4x) -pmull_gcm_enc_4x: +SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x) ld1 {KS0.16b}, [x5] // load upper counter sub w10, w8, #4 sub w11, w8, #3 @@ -746,7 +746,7 @@ pmull_gcm_enc_4x: eor INP3.16b, INP3.16b, KS3.16b ret -ENDPROC(pmull_gcm_enc_4x) +SYM_FUNC_END(pmull_gcm_enc_4x) .section ".rodata", "a" .align 6 -- cgit v1.2.3 From b8e505484e376322cb1e12540e8b52dc31b73b6e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:26 +0000 Subject: arm64: crypto: Modernize names for AES function macros Now that the rest of the code has been converted to the modern START/END macros the AES_ENTRY() and AES_ENDPROC() macros look out of place and like they need updating. Rename them to AES_FUNC_START() and AES_FUNC_END() to line up with the modern style assembly macros. Signed-off-by: Mark Brown Reviewed-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/crypto/aes-ce.S | 4 ++-- arch/arm64/crypto/aes-modes.S | 48 +++++++++++++++++++++---------------------- arch/arm64/crypto/aes-neon.S | 4 ++-- 3 files changed, 28 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 45062553467f..1dc5bbbfeed2 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S @@ -9,8 +9,8 @@ #include #include -#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func) -#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func) +#define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func) +#define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func) .arch armv8-a+crypto diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 8a2faa42b57e..cf618d8f6cec 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x) * int blocks) */ -AES_ENTRY(aes_ecb_encrypt) +AES_FUNC_START(aes_ecb_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 ) .Lecbencout: ldp x29, x30, [sp], #16 ret -AES_ENDPROC(aes_ecb_encrypt) +AES_FUNC_END(aes_ecb_encrypt) -AES_ENTRY(aes_ecb_decrypt) +AES_FUNC_START(aes_ecb_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) .Lecbdecout: ldp x29, x30, [sp], #16 ret -AES_ENDPROC(aes_ecb_decrypt) +AES_FUNC_END(aes_ecb_decrypt) /* @@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt) * u32 const rk2[]); */ -AES_ENTRY(aes_essiv_cbc_encrypt) +AES_FUNC_START(aes_essiv_cbc_encrypt) ld1 {v4.16b}, [x5] /* get iv */ mov w8, #14 /* AES-256: 14 rounds */ @@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt) enc_switch_key w3, x2, x6 b .Lcbcencloop4x -AES_ENTRY(aes_cbc_encrypt) +AES_FUNC_START(aes_cbc_encrypt) ld1 {v4.16b}, [x5] /* get iv */ enc_prepare w3, x2, x6 @@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt) .Lcbcencout: st1 {v4.16b}, [x5] /* return iv */ ret -AES_ENDPROC(aes_cbc_encrypt) -AES_ENDPROC(aes_essiv_cbc_encrypt) +AES_FUNC_END(aes_cbc_encrypt) +AES_FUNC_END(aes_essiv_cbc_encrypt) -AES_ENTRY(aes_essiv_cbc_decrypt) +AES_FUNC_START(aes_essiv_cbc_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt) encrypt_block cbciv, w8, x6, x7, w9 b .Lessivcbcdecstart -AES_ENTRY(aes_cbc_decrypt) +AES_FUNC_START(aes_cbc_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 ) st1 {cbciv.16b}, [x5] /* return iv */ ldp x29, x30, [sp], #16 ret -AES_ENDPROC(aes_cbc_decrypt) -AES_ENDPROC(aes_essiv_cbc_decrypt) +AES_FUNC_END(aes_cbc_decrypt) +AES_FUNC_END(aes_essiv_cbc_decrypt) /* @@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt) * int rounds, int bytes, u8 const iv[]) */ -AES_ENTRY(aes_cbc_cts_encrypt) +AES_FUNC_START(aes_cbc_cts_encrypt) adr_l x8, .Lcts_permute_table sub x4, x4, #16 add x9, x8, #32 @@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt) st1 {v0.16b}, [x4] /* overlapping stores */ st1 {v1.16b}, [x0] ret -AES_ENDPROC(aes_cbc_cts_encrypt) +AES_FUNC_END(aes_cbc_cts_encrypt) -AES_ENTRY(aes_cbc_cts_decrypt) +AES_FUNC_START(aes_cbc_cts_decrypt) adr_l x8, .Lcts_permute_table sub x4, x4, #16 add x9, x8, #32 @@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt) st1 {v2.16b}, [x4] /* overlapping stores */ st1 {v0.16b}, [x0] ret -AES_ENDPROC(aes_cbc_cts_decrypt) +AES_FUNC_END(aes_cbc_cts_decrypt) .section ".rodata", "a" .align 6 @@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt) * int blocks, u8 ctr[]) */ -AES_ENTRY(aes_ctr_encrypt) +AES_FUNC_START(aes_ctr_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) rev x7, x7 ins vctr.d[0], x7 b .Lctrcarrydone -AES_ENDPROC(aes_ctr_encrypt) +AES_FUNC_END(aes_ctr_encrypt) /* @@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt) uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s .endm -AES_ENTRY(aes_xts_encrypt) +AES_FUNC_START(aes_xts_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt) st1 {v2.16b}, [x4] /* overlapping stores */ mov w4, wzr b .Lxtsencctsout -AES_ENDPROC(aes_xts_encrypt) +AES_FUNC_END(aes_xts_encrypt) -AES_ENTRY(aes_xts_decrypt) +AES_FUNC_START(aes_xts_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp @@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt) st1 {v2.16b}, [x4] /* overlapping stores */ mov w4, wzr b .Lxtsdecctsout -AES_ENDPROC(aes_xts_decrypt) +AES_FUNC_END(aes_xts_decrypt) /* * aes_mac_update(u8 const in[], u32 const rk[], int rounds, * int blocks, u8 dg[], int enc_before, int enc_after) */ -AES_ENTRY(aes_mac_update) +AES_FUNC_START(aes_mac_update) frame_push 6 mov x19, x0 @@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update) ld1 {v0.16b}, [x23] /* get dg */ enc_prepare w21, x20, x0 b .Lmacloop4x -AES_ENDPROC(aes_mac_update) +AES_FUNC_END(aes_mac_update) diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index 247d34ddaab0..e47d3ec2cfb4 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S @@ -8,8 +8,8 @@ #include #include -#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func) -#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func) +#define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func) +#define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func) xtsmask .req v7 cbciv .req v7 -- cgit v1.2.3 From 0ccbd98a92c19a34921adb082cd00c58e134ba02 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:27 +0000 Subject: arm64: entry: Annotate vector table and handlers as code In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. The vector table and handlers aren't normal C style code so should be annotated as CODE. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 76 +++++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9461d812ae27..1454f3ea2e2e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -465,7 +465,7 @@ alternative_endif .pushsection ".entry.text", "ax" .align 11 -ENTRY(vectors) +SYM_CODE_START(vectors) kernel_ventry 1, sync_invalid // Synchronous EL1t kernel_ventry 1, irq_invalid // IRQ EL1t kernel_ventry 1, fiq_invalid // FIQ EL1t @@ -492,7 +492,7 @@ ENTRY(vectors) kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif -END(vectors) +SYM_CODE_END(vectors) #ifdef CONFIG_VMAP_STACK /* @@ -534,57 +534,57 @@ __bad_stack: ASM_BUG() .endm -el0_sync_invalid: +SYM_CODE_START_LOCAL(el0_sync_invalid) inv_entry 0, BAD_SYNC -ENDPROC(el0_sync_invalid) +SYM_CODE_END(el0_sync_invalid) -el0_irq_invalid: +SYM_CODE_START_LOCAL(el0_irq_invalid) inv_entry 0, BAD_IRQ -ENDPROC(el0_irq_invalid) +SYM_CODE_END(el0_irq_invalid) -el0_fiq_invalid: +SYM_CODE_START_LOCAL(el0_fiq_invalid) inv_entry 0, BAD_FIQ -ENDPROC(el0_fiq_invalid) +SYM_CODE_END(el0_fiq_invalid) -el0_error_invalid: +SYM_CODE_START_LOCAL(el0_error_invalid) inv_entry 0, BAD_ERROR -ENDPROC(el0_error_invalid) +SYM_CODE_END(el0_error_invalid) #ifdef CONFIG_COMPAT -el0_fiq_invalid_compat: +SYM_CODE_START_LOCAL(el0_fiq_invalid_compat) inv_entry 0, BAD_FIQ, 32 -ENDPROC(el0_fiq_invalid_compat) +SYM_CODE_END(el0_fiq_invalid_compat) #endif -el1_sync_invalid: +SYM_CODE_START_LOCAL(el1_sync_invalid) inv_entry 1, BAD_SYNC -ENDPROC(el1_sync_invalid) +SYM_CODE_END(el1_sync_invalid) -el1_irq_invalid: +SYM_CODE_START_LOCAL(el1_irq_invalid) inv_entry 1, BAD_IRQ -ENDPROC(el1_irq_invalid) +SYM_CODE_END(el1_irq_invalid) -el1_fiq_invalid: +SYM_CODE_START_LOCAL(el1_fiq_invalid) inv_entry 1, BAD_FIQ -ENDPROC(el1_fiq_invalid) +SYM_CODE_END(el1_fiq_invalid) -el1_error_invalid: +SYM_CODE_START_LOCAL(el1_error_invalid) inv_entry 1, BAD_ERROR -ENDPROC(el1_error_invalid) +SYM_CODE_END(el1_error_invalid) /* * EL1 mode handlers. */ .align 6 -el1_sync: +SYM_CODE_START_LOCAL_NOALIGN(el1_sync) kernel_entry 1 mov x0, sp bl el1_sync_handler kernel_exit 1 -ENDPROC(el1_sync) +SYM_CODE_END(el1_sync) .align 6 -el1_irq: +SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 gic_prio_irq_setup pmr=x20, tmp=x1 enable_da_f @@ -639,42 +639,42 @@ alternative_else_nop_endif #endif kernel_exit 1 -ENDPROC(el1_irq) +SYM_CODE_END(el1_irq) /* * EL0 mode handlers. */ .align 6 -el0_sync: +SYM_CODE_START_LOCAL_NOALIGN(el0_sync) kernel_entry 0 mov x0, sp bl el0_sync_handler b ret_to_user -ENDPROC(el0_sync) +SYM_CODE_END(el0_sync) #ifdef CONFIG_COMPAT .align 6 -el0_sync_compat: +SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat) kernel_entry 0, 32 mov x0, sp bl el0_sync_compat_handler b ret_to_user -ENDPROC(el0_sync_compat) +SYM_CODE_END(el0_sync_compat) .align 6 -el0_irq_compat: +SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) kernel_entry 0, 32 b el0_irq_naked -ENDPROC(el0_irq_compat) +SYM_CODE_END(el0_irq_compat) -el0_error_compat: +SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) kernel_entry 0, 32 b el0_error_naked -ENDPROC(el0_error_compat) +SYM_CODE_END(el0_error_compat) #endif .align 6 -el0_irq: +SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 el0_irq_naked: gic_prio_irq_setup pmr=x20, tmp=x0 @@ -696,9 +696,9 @@ el0_irq_naked: bl trace_hardirqs_on #endif b ret_to_user -ENDPROC(el0_irq) +SYM_CODE_END(el0_irq) -el1_error: +SYM_CODE_START_LOCAL(el1_error) kernel_entry 1 mrs x1, esr_el1 gic_prio_kentry_setup tmp=x2 @@ -706,9 +706,9 @@ el1_error: mov x0, sp bl do_serror kernel_exit 1 -ENDPROC(el1_error) +SYM_CODE_END(el1_error) -el0_error: +SYM_CODE_START_LOCAL(el0_error) kernel_entry 0 el0_error_naked: mrs x25, esr_el1 @@ -720,7 +720,7 @@ el0_error_naked: bl do_serror enable_da_f b ret_to_user -ENDPROC(el0_error) +SYM_CODE_END(el0_error) /* * Ok, we need to do extra processing, enter the slow path. -- cgit v1.2.3 From c3357fc5415d6e0c45c7c8987e06cf8f8b3f3a54 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:28 +0000 Subject: arm64: entry: Annotate ret_from_fork as code In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. ret_from_fork is not a normal C function and should therefore be annotated as code. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 1454f3ea2e2e..d535cb8a7413 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -902,14 +902,14 @@ NOKPROBE(cpu_switch_to) /* * This is how we return from a fork. */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) bl schedule_tail cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 1: get_current_task tsk b ret_to_user -ENDPROC(ret_from_fork) +SYM_CODE_END(ret_from_fork) NOKPROBE(ret_from_fork) #ifdef CONFIG_ARM_SDE_INTERFACE -- cgit v1.2.3 From e7bf6972177376496bf0748d9a99faa72960fb0b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:29 +0000 Subject: arm64: entry: Additional annotation conversions for entry.S In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC with separate annotations for standard C callable functions, data and code with different calling conventions. Update the remaining annotations in the entry.S code to the new macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d535cb8a7413..fbf69fe94412 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -832,7 +832,7 @@ alternative_else_nop_endif .endm .align 11 -ENTRY(tramp_vectors) +SYM_CODE_START_NOALIGN(tramp_vectors) .space 0x400 tramp_ventry @@ -844,15 +844,15 @@ ENTRY(tramp_vectors) tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 -END(tramp_vectors) +SYM_CODE_END(tramp_vectors) -ENTRY(tramp_exit_native) +SYM_CODE_START(tramp_exit_native) tramp_exit -END(tramp_exit_native) +SYM_CODE_END(tramp_exit_native) -ENTRY(tramp_exit_compat) +SYM_CODE_START(tramp_exit_compat) tramp_exit 32 -END(tramp_exit_compat) +SYM_CODE_END(tramp_exit_compat) .ltorg .popsection // .entry.tramp.text @@ -874,7 +874,7 @@ __entry_tramp_data_start: * Previous and next are guaranteed not to be the same. * */ -ENTRY(cpu_switch_to) +SYM_FUNC_START(cpu_switch_to) mov x10, #THREAD_CPU_CONTEXT add x8, x0, x10 mov x9, sp @@ -896,7 +896,7 @@ ENTRY(cpu_switch_to) mov sp, x9 msr sp_el0, x1 ret -ENDPROC(cpu_switch_to) +SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) /* -- cgit v1.2.3 From e2d591d29d44af2078336c5dd7ab503c151a2607 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:30 +0000 Subject: arm64: entry-ftrace.S: Convert to modern annotations for assembly functions In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the core kernel code to the new macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-ftrace.S | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 7d02f9966d34..3d32b6d325d7 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -91,11 +91,11 @@ ENTRY(ftrace_common) ldr_l x2, function_trace_op // op mov x3, sp // regs -GLOBAL(ftrace_call) +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) bl ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER -GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif @@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller) * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ -ENTRY(_mcount) +SYM_FUNC_START(_mcount) mcount_enter ldr_l x2, ftrace_trace_function @@ -242,7 +242,7 @@ skip_ftrace_call: // } b.ne ftrace_graph_caller // ftrace_graph_caller(); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ mcount_exit -ENDPROC(_mcount) +SYM_FUNC_END(_mcount) EXPORT_SYMBOL(_mcount) NOKPROBE(_mcount) @@ -253,9 +253,9 @@ NOKPROBE(_mcount) * and later on, NOP to branch to ftrace_caller() when enabled or branch to * NOP when disabled per-function base. */ -ENTRY(_mcount) +SYM_FUNC_START(_mcount) ret -ENDPROC(_mcount) +SYM_FUNC_END(_mcount) EXPORT_SYMBOL(_mcount) NOKPROBE(_mcount) @@ -268,24 +268,24 @@ NOKPROBE(_mcount) * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ -ENTRY(ftrace_caller) +SYM_FUNC_START(ftrace_caller) mcount_enter mcount_get_pc0 x0 // function's pc mcount_get_lr x1 // function's lr -GLOBAL(ftrace_call) // tracer(pc, lr); +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr); nop // This will be replaced with "bl xxx" // where xxx can be any kind of tracer. #ifdef CONFIG_FUNCTION_GRAPH_TRACER -GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); +SYM_INNER_LABEL(ftrace_graph_call) // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif mcount_exit -ENDPROC(ftrace_caller) +SYM_FUNC_END(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -298,20 +298,20 @@ ENDPROC(ftrace_caller) * the call stack in order to intercept instrumented function's return path * and run return_to_handler() later on its exit. */ -ENTRY(ftrace_graph_caller) +SYM_FUNC_START(ftrace_graph_caller) mcount_get_pc x0 // function's pc mcount_get_lr_addr x1 // pointer to function's saved lr mcount_get_parent_fp x2 // parent's fp bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) mcount_exit -ENDPROC(ftrace_graph_caller) +SYM_FUNC_END(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ -ENTRY(ftrace_stub) +SYM_FUNC_START(ftrace_stub) ret -ENDPROC(ftrace_stub) +SYM_FUNC_END(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* -- cgit v1.2.3 From e434b08b442b63b431de2737a99885e07efd1b0e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:31 +0000 Subject: arm64: ftrace: Correct annotation of ftrace_caller assembly In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. The patchable function entry versions of ftrace_*_caller don't follow the usual AAPCS rules, pushing things onto the stack which they don't clean up, and therefore should be annotated as code rather than functions. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-ftrace.S | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 3d32b6d325d7..baf5a20a5566 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -75,17 +75,17 @@ add x29, sp, #S_STACKFRAME .endm -ENTRY(ftrace_regs_caller) +SYM_CODE_START(ftrace_regs_caller) ftrace_regs_entry 1 b ftrace_common -ENDPROC(ftrace_regs_caller) +SYM_CODE_END(ftrace_regs_caller) -ENTRY(ftrace_caller) +SYM_CODE_START(ftrace_caller) ftrace_regs_entry 0 b ftrace_common -ENDPROC(ftrace_caller) +SYM_CODE_END(ftrace_caller) -ENTRY(ftrace_common) +SYM_CODE_START(ftrace_common) sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) mov x1, x9 // parent_ip (callsite's LR) ldr_l x2, function_trace_op // op @@ -122,17 +122,17 @@ ftrace_common_return: add sp, sp, #S_FRAME_SIZE + 16 ret x9 -ENDPROC(ftrace_common) +SYM_CODE_END(ftrace_common) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_CODE_START(ftrace_graph_caller) ldr x0, [sp, #S_PC] sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn) add x1, sp, #S_LR // parent_ip (callsite's LR) ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP) bl prepare_ftrace_return b ftrace_common_return -ENDPROC(ftrace_graph_caller) +SYM_CODE_END(ftrace_graph_caller) #endif #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ -- cgit v1.2.3 From 1e4729ed028d1e70508926a58363ee85243fdaaa Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:32 +0000 Subject: arm64: ftrace: Modernise annotation of return_to_handler In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. return_to_handler does entertaining things with LR so doesn't follow the usual C conventions and should therefore be annotated as code rather than a function. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-ftrace.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index baf5a20a5566..820101821ac4 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -320,7 +320,7 @@ SYM_FUNC_END(ftrace_stub) * Run ftrace_return_to_handler() before going back to parent. * @fp is checked against the value passed by ftrace_graph_caller(). */ -ENTRY(return_to_handler) +SYM_CODE_START(return_to_handler) /* save return value regs */ sub sp, sp, #64 stp x0, x1, [sp] @@ -340,5 +340,5 @@ ENTRY(return_to_handler) add sp, sp, #64 ret -END(return_to_handler) +SYM_CODE_END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- cgit v1.2.3 From c63d9f82db94399022a193cdfd57dbafa2a871cb Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:33 +0000 Subject: arm64: head.S: Convert to modern annotations for assembly functions In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the core kernel code to the new macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 56 ++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..716c946c98e9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ -__create_page_tables: +SYM_FUNC_START_LOCAL(__create_page_tables) mov x28, lr /* @@ -403,7 +403,7 @@ __create_page_tables: bl __inval_dcache_area ret x28 -ENDPROC(__create_page_tables) +SYM_FUNC_END(__create_page_tables) .ltorg /* @@ -411,7 +411,7 @@ ENDPROC(__create_page_tables) * * x0 = __PHYS_OFFSET */ -__primary_switched: +SYM_FUNC_START_LOCAL(__primary_switched) adrp x4, init_thread_union add sp, x4, #THREAD_SIZE adr_l x5, init_task @@ -456,7 +456,7 @@ __primary_switched: mov x29, #0 mov x30, #0 b start_kernel -ENDPROC(__primary_switched) +SYM_FUNC_END(__primary_switched) /* * end early head section, begin head code that is also used for @@ -475,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr) * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ -ENTRY(el2_setup) +SYM_FUNC_START(el2_setup) msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -636,13 +636,13 @@ install_el2_stub: msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret -ENDPROC(el2_setup) +SYM_FUNC_END(el2_setup) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ -set_cpu_boot_mode_flag: +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -651,7 +651,7 @@ set_cpu_boot_mode_flag: dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret -ENDPROC(set_cpu_boot_mode_flag) +SYM_FUNC_END(set_cpu_boot_mode_flag) /* * These values are written with the MMU off, but read with the MMU on. @@ -683,7 +683,7 @@ ENTRY(__early_cpu_boot_status) * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ -ENTRY(secondary_holding_pen) +SYM_FUNC_START(secondary_holding_pen) bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 @@ -695,19 +695,19 @@ pen: ldr x4, [x3] b.eq secondary_startup wfe b pen -ENDPROC(secondary_holding_pen) +SYM_FUNC_END(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ -ENTRY(secondary_entry) +SYM_FUNC_START(secondary_entry) bl el2_setup // Drop to EL1 bl set_cpu_boot_mode_flag b secondary_startup -ENDPROC(secondary_entry) +SYM_FUNC_END(secondary_entry) -secondary_startup: +SYM_FUNC_START_LOCAL(secondary_startup) /* * Common entry point for secondary CPUs. */ @@ -717,9 +717,9 @@ secondary_startup: bl __enable_mmu ldr x8, =__secondary_switched br x8 -ENDPROC(secondary_startup) +SYM_FUNC_END(secondary_startup) -__secondary_switched: +SYM_FUNC_START_LOCAL(__secondary_switched) adr_l x5, vectors msr vbar_el1, x5 isb @@ -734,13 +734,13 @@ __secondary_switched: mov x29, #0 mov x30, #0 b secondary_start_kernel -ENDPROC(__secondary_switched) +SYM_FUNC_END(__secondary_switched) -__secondary_too_slow: +SYM_FUNC_START_LOCAL(__secondary_too_slow) wfe wfi b __secondary_too_slow -ENDPROC(__secondary_too_slow) +SYM_FUNC_END(__secondary_too_slow) /* * The booting CPU updates the failed status @__early_cpu_boot_status, @@ -772,7 +772,7 @@ ENDPROC(__secondary_too_slow) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ -ENTRY(__enable_mmu) +SYM_FUNC_START(__enable_mmu) mrs x2, ID_AA64MMFR0_EL1 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED @@ -796,9 +796,9 @@ ENTRY(__enable_mmu) dsb nsh isb ret -ENDPROC(__enable_mmu) +SYM_FUNC_END(__enable_mmu) -ENTRY(__cpu_secondary_check52bitva) +SYM_FUNC_START(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x0, vabits_actual cmp x0, #52 @@ -816,9 +816,9 @@ ENTRY(__cpu_secondary_check52bitva) #endif 2: ret -ENDPROC(__cpu_secondary_check52bitva) +SYM_FUNC_END(__cpu_secondary_check52bitva) -__no_granule_support: +SYM_FUNC_START_LOCAL(__no_granule_support) /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status \ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 @@ -826,10 +826,10 @@ __no_granule_support: wfe wfi b 1b -ENDPROC(__no_granule_support) +SYM_FUNC_END(__no_granule_support) #ifdef CONFIG_RELOCATABLE -__relocate_kernel: +SYM_FUNC_START_LOCAL(__relocate_kernel) /* * Iterate over each entry in the relocation table, and apply the * relocations in place. @@ -931,10 +931,10 @@ __relocate_kernel: #endif ret -ENDPROC(__relocate_kernel) +SYM_FUNC_END(__relocate_kernel) #endif -__primary_switch: +SYM_FUNC_START_LOCAL(__primary_switch) #ifdef CONFIG_RANDOMIZE_BASE mov x19, x0 // preserve new SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value @@ -977,4 +977,4 @@ __primary_switch: ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 -ENDPROC(__primary_switch) +SYM_FUNC_END(__primary_switch) -- cgit v1.2.3 From ebdf44a189984d6f72db6427194d4f5a6b07dc17 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:34 +0000 Subject: arm64: head: Annotate stext and preserve_boot_args as code In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. Neither stext nor preserve_boot_args is called with the usual AAPCS calling conventions and they should therefore be annotated as code. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 716c946c98e9..c334863991e7 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -105,7 +105,7 @@ pe_header: * x24 __primary_switch() .. relocate_kernel() * current RELR displacement */ -ENTRY(stext) +SYM_CODE_START(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w0=cpu_boot_mode adrp x23, __PHYS_OFFSET @@ -120,12 +120,12 @@ ENTRY(stext) */ bl __cpu_setup // initialise processor b __primary_switch -ENDPROC(stext) +SYM_CODE_END(stext) /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ -preserve_boot_args: +SYM_CODE_START_LOCAL(preserve_boot_args) mov x21, x0 // x21=FDT adr_l x0, boot_args // record the contents of @@ -137,7 +137,7 @@ preserve_boot_args: mov x1, #0x20 // 4 x 8 bytes b __inval_dcache_area // tail call -ENDPROC(preserve_boot_args) +SYM_CODE_END(preserve_boot_args) /* * Macro to create a table entry to the next page. -- cgit v1.2.3 From a5d4420b26b5068a98ce75210b07d2e083f5f691 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:35 +0000 Subject: arm64: kernel: Convert to modern annotations for assembly data In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These include specific annotations for the start and end of data, update symbols for data to use these. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 7 ++++--- arch/arm64/kernel/head.S | 9 ++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index fbf69fe94412..7439f29946fb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -859,9 +859,9 @@ SYM_CODE_END(tramp_exit_compat) #ifdef CONFIG_RANDOMIZE_BASE .pushsection ".rodata", "a" .align PAGE_SHIFT - .globl __entry_tramp_data_start -__entry_tramp_data_start: +SYM_DATA_START(__entry_tramp_data_start) .quad vectors +SYM_DATA_END(__entry_tramp_data_start) .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ @@ -983,8 +983,9 @@ NOKPROBE(__sdei_asm_exit_trampoline) .popsection // .entry.tramp.text #ifdef CONFIG_RANDOMIZE_BASE .pushsection ".rodata", "a" -__sdei_asm_trampoline_next_handler: +SYM_DATA_START(__sdei_asm_trampoline_next_handler) .quad __sdei_asm_handler +SYM_DATA_END(__sdei_asm_trampoline_next_handler) .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c334863991e7..a06727354fad 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -464,8 +464,9 @@ SYM_FUNC_END(__primary_switched) */ .section ".idmap.text","awx" -ENTRY(kimage_vaddr) +SYM_DATA_START(kimage_vaddr) .quad _text - TEXT_OFFSET +SYM_DATA_END(kimage_vaddr) EXPORT_SYMBOL(kimage_vaddr) /* @@ -667,15 +668,17 @@ SYM_FUNC_END(set_cpu_boot_mode_flag) * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ -ENTRY(__boot_cpu_mode) +SYM_DATA_START(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL1 +SYM_DATA_END(__boot_cpu_mode) /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. */ -ENTRY(__early_cpu_boot_status) +SYM_DATA_START(__early_cpu_boot_status) .quad 0 +SYM_DATA_END(__early_cpu_boot_status) .popsection -- cgit v1.2.3 From 617a2f392c92dbdeed21817d3d7e1a1e85719550 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:37 +0000 Subject: arm64: kvm: Annotate assembly using modern annoations In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC with separate annotations for standard C callable functions, data and code with different calling conventions. Update the more straightforward annotations in the kvm code to the new macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas Acked-by: Marc Zyngier --- arch/arm64/kvm/hyp-init.S | 8 ++++---- arch/arm64/kvm/hyp.S | 4 ++-- arch/arm64/kvm/hyp/fpsimd.S | 8 ++++---- arch/arm64/kvm/hyp/hyp-entry.S | 15 ++++++++------- 4 files changed, 18 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 160be2b4696d..84f32cf5abc7 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -18,7 +18,7 @@ .align 11 -ENTRY(__kvm_hyp_init) +SYM_CODE_START(__kvm_hyp_init) ventry __invalid // Synchronous EL2t ventry __invalid // IRQ EL2t ventry __invalid // FIQ EL2t @@ -117,9 +117,9 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) /* Hello, World! */ eret -ENDPROC(__kvm_hyp_init) +SYM_CODE_END(__kvm_hyp_init) -ENTRY(__kvm_handle_stub_hvc) +SYM_CODE_START(__kvm_handle_stub_hvc) cmp x0, #HVC_SOFT_RESTART b.ne 1f @@ -158,7 +158,7 @@ reset: ldr x0, =HVC_STUB_ERR eret -ENDPROC(__kvm_handle_stub_hvc) +SYM_CODE_END(__kvm_handle_stub_hvc) .ltorg diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index c0094d520dff..3c79a1124af2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -28,7 +28,7 @@ * and is used to implement hyp stubs in the same way as in * arch/arm64/kernel/hyp_stub.S. */ -ENTRY(__kvm_call_hyp) +SYM_FUNC_START(__kvm_call_hyp) hvc #0 ret -ENDPROC(__kvm_call_hyp) +SYM_FUNC_END(__kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 78ff53225691..5b8ff517ff10 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -11,12 +11,12 @@ .text .pushsection .hyp.text, "ax" -ENTRY(__fpsimd_save_state) +SYM_FUNC_START(__fpsimd_save_state) fpsimd_save x0, 1 ret -ENDPROC(__fpsimd_save_state) +SYM_FUNC_END(__fpsimd_save_state) -ENTRY(__fpsimd_restore_state) +SYM_FUNC_START(__fpsimd_restore_state) fpsimd_restore x0, 1 ret -ENDPROC(__fpsimd_restore_state) +SYM_FUNC_END(__fpsimd_restore_state) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index ffa68d5713f1..0aea8f9ab23d 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -180,7 +180,7 @@ el2_error: eret sb -ENTRY(__hyp_do_panic) +SYM_FUNC_START(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr @@ -188,18 +188,19 @@ ENTRY(__hyp_do_panic) msr elr_el2, lr eret sb -ENDPROC(__hyp_do_panic) +SYM_FUNC_END(__hyp_do_panic) -ENTRY(__hyp_panic) +SYM_CODE_START(__hyp_panic) get_host_ctxt x0, x1 b hyp_panic -ENDPROC(__hyp_panic) +SYM_CODE_END(__hyp_panic) .macro invalid_vector label, target = __hyp_panic .align 2 +SYM_CODE_START(\label) \label: b \target -ENDPROC(\label) +SYM_CODE_END(\label) .endm /* None of these should ever happen */ @@ -246,7 +247,7 @@ check_preamble_length 661b, 662b check_preamble_length 661b, 662b .endm -ENTRY(__kvm_hyp_vector) +SYM_CODE_START(__kvm_hyp_vector) invalid_vect el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_irq_invalid // IRQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t @@ -266,7 +267,7 @@ ENTRY(__kvm_hyp_vector) valid_vect el1_irq // IRQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 -ENDPROC(__kvm_hyp_vector) +SYM_CODE_END(__kvm_hyp_vector) #ifdef CONFIG_KVM_INDIRECT_VECTORS .macro hyp_ventry -- cgit v1.2.3 From 6e52aab9015277c39c6f03a76a7e9487370013f8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:38 +0000 Subject: arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs We have recently introduced new macros for annotating assembly symbols for things that aren't C functions, SYM_CODE_START() and SYM_CODE_END(), in an effort to clarify and simplify our annotations of assembly files. Using these for __bp_harden_hyp_vecs is more involved than for most symbols as this symbol is annotated quite unusually as rather than just have the explicit symbol we define _start and _end symbols which we then use to compute the length. This does not play at all nicely with the new style macros. Since the size of the vectors is a known constant which won't vary the simplest thing to do is simply to drop the separate _start and _end symbols and just use a #define for the size. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas Acked-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 9 ++++----- arch/arm64/include/asm/mmu.h | 4 +++- arch/arm64/kernel/cpu_errata.c | 2 +- arch/arm64/kvm/hyp/hyp-entry.S | 6 ++++-- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 53d846f1bfe7..b5f723cf9599 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void) int slot = -1; if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { - vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start)); + vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); slot = data->hyp_vectors_slot; } @@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void) * HBP + HEL2 -> use hardened vertors and use exec mapping */ if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { - __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start); + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); } if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { - phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start); - unsigned long size = (__bp_harden_hyp_vecs_end - - __bp_harden_hyp_vecs_start); + phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); + unsigned long size = __BP_HARDEN_HYP_VECS_SZ; /* * Always allocate a spare vector slot, as we don't diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e4d862420bb4..a3324d6ccbfe 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -13,6 +13,7 @@ #define TTBR_ASID_MASK (UL(0xffff) << 48) #define BP_HARDEN_EL2_SLOTS 4 +#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K) #ifndef __ASSEMBLY__ @@ -45,7 +46,8 @@ struct bp_hardening_data { #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ defined(CONFIG_HARDEN_EL2_VECTORS)) -extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; + +extern char __bp_harden_hyp_vecs[]; extern atomic_t arm64_el2_vector_last_slot; #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 703ad0a84f99..0af2201cefda 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -119,7 +119,7 @@ extern char __smccc_workaround_1_smc_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) { - void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); + void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); int i; for (i = 0; i < SZ_2K; i += 0x80) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 0aea8f9ab23d..1e2ab928a92f 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -312,11 +312,13 @@ alternative_cb_end .endm .align 11 -ENTRY(__bp_harden_hyp_vecs_start) +SYM_CODE_START(__bp_harden_hyp_vecs) .rept BP_HARDEN_EL2_SLOTS generate_vectors .endr -ENTRY(__bp_harden_hyp_vecs_end) +1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ + .org 1b +SYM_CODE_END(__bp_harden_hyp_vecs) .popsection -- cgit v1.2.3 From 4db61fef16a104f94bde24fe163064b98cee6b7c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:39 +0000 Subject: arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC with separate annotations for standard C callable functions, data and code with different calling conventions. Using these for __smccc_workaround_1_smc is more involved than for most symbols as this symbol is annotated quite unusually, rather than just have the explicit symbol we define _start and _end symbols which we then use to compute the length. This does not play at all nicely with the new style macros. Instead define a constant for the size of the function and use that in both the C code and for .org based size checks in the assembly code. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas Acked-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 4 ++++ arch/arm64/kernel/cpu_errata.c | 14 ++++++-------- arch/arm64/kvm/hyp/hyp-entry.S | 6 ++++-- 3 files changed, 14 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 44a243754c1b..7c7eeeaab9fa 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -36,6 +36,8 @@ */ #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) +#define __SMCCC_WORKAROUND_1_SMC_SZ 36 + #ifndef __ASSEMBLY__ #include @@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); +extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; + /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ #define __hyp_this_cpu_ptr(sym) \ ({ \ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0af2201cefda..6a2ca339741c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -11,6 +11,7 @@ #include #include #include +#include #include static bool __maybe_unused @@ -113,9 +114,6 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM_INDIRECT_VECTORS -extern char __smccc_workaround_1_smc_start[]; -extern char __smccc_workaround_1_smc_end[]; - static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) { @@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, raw_spin_unlock(&bp_lock); } #else -#define __smccc_workaround_1_smc_start NULL -#define __smccc_workaround_1_smc_end NULL - static void install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void) smccc_end = NULL; break; +#if IS_ENABLED(CONFIG_KVM_ARM_HOST) case SMCCC_CONDUIT_SMC: cb = call_smc_arch_workaround_1; - smccc_start = __smccc_workaround_1_smc_start; - smccc_end = __smccc_workaround_1_smc_end; + smccc_start = __smccc_workaround_1_smc; + smccc_end = __smccc_workaround_1_smc + + __SMCCC_WORKAROUND_1_SMC_SZ; break; +#endif default: return -1; diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 1e2ab928a92f..c2a13ab3c471 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -322,7 +322,7 @@ SYM_CODE_END(__bp_harden_hyp_vecs) .popsection -ENTRY(__smccc_workaround_1_smc_start) +SYM_CODE_START(__smccc_workaround_1_smc) esb sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] @@ -332,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start) ldp x2, x3, [sp, #(8 * 0)] ldp x0, x1, [sp, #(8 * 2)] add sp, sp, #(8 * 4) -ENTRY(__smccc_workaround_1_smc_end) +1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ + .org 1b +SYM_CODE_END(__smccc_workaround_1_smc) #endif -- cgit v1.2.3 From 1242b9b303277425c01e605af96e9fb9fb80c399 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:40 +0000 Subject: arm64: sdei: Annotate SDEI entry points using new style annotations In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. The SDEI entry points are currently annotated as normal functions but are called from non-kernel contexts with non-standard calling convention and should therefore be annotated as such so do so. Signed-off-by: Mark Brown Acked-by: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7439f29946fb..e5d4e30ee242 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -938,7 +938,7 @@ NOKPROBE(ret_from_fork) */ .ltorg .pushsection ".entry.tramp.text", "ax" -ENTRY(__sdei_asm_entry_trampoline) +SYM_CODE_START(__sdei_asm_entry_trampoline) mrs x4, ttbr1_el1 tbz x4, #USER_ASID_BIT, 1f @@ -960,7 +960,7 @@ ENTRY(__sdei_asm_entry_trampoline) ldr x4, =__sdei_asm_handler #endif br x4 -ENDPROC(__sdei_asm_entry_trampoline) +SYM_CODE_END(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline) /* @@ -970,14 +970,14 @@ NOKPROBE(__sdei_asm_entry_trampoline) * x2: exit_mode * x4: struct sdei_registered_event argument from registration time. */ -ENTRY(__sdei_asm_exit_trampoline) +SYM_CODE_START(__sdei_asm_exit_trampoline) ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] cbnz x4, 1f tramp_unmap_kernel tmp=x4 1: sdei_handler_exit exit_mode=x2 -ENDPROC(__sdei_asm_exit_trampoline) +SYM_CODE_END(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline) .ltorg .popsection // .entry.tramp.text @@ -1003,7 +1003,7 @@ SYM_DATA_END(__sdei_asm_trampoline_next_handler) * follow SMC-CC. We save (or retrieve) all the registers as the handler may * want them. */ -ENTRY(__sdei_asm_handler) +SYM_CODE_START(__sdei_asm_handler) stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] @@ -1086,6 +1086,6 @@ alternative_else_nop_endif tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline br x5 #endif -ENDPROC(__sdei_asm_handler) +SYM_CODE_END(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler) #endif /* CONFIG_ARM_SDE_INTERFACE */ -- cgit v1.2.3 From c91db232da4848511bc998582d9b47fa8048a492 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:41 +0000 Subject: arm64: vdso: Convert to modern assembler annotations In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. Convert the assembly function in the arm64 VDSO to the new macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso/sigreturn.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S index 0723aa398d6e..12324863d5c2 100644 --- a/arch/arm64/kernel/vdso/sigreturn.S +++ b/arch/arm64/kernel/vdso/sigreturn.S @@ -14,7 +14,7 @@ .text nop -ENTRY(__kernel_rt_sigreturn) +SYM_FUNC_START(__kernel_rt_sigreturn) .cfi_startproc .cfi_signal_frame .cfi_def_cfa x29, 0 @@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn) mov x8, #__NR_rt_sigreturn svc #0 .cfi_endproc -ENDPROC(__kernel_rt_sigreturn) +SYM_FUNC_END(__kernel_rt_sigreturn) -- cgit v1.2.3 From 1157eb8f2ef3f5ff3acc372e36a657a96cd9754c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 18 Feb 2020 19:58:42 +0000 Subject: arm64: vdso32: Convert to modern assembler annotations In an effort to clarify and simplify the annotation of assembly functions new macros have been introduced. These replace ENTRY and ENDPROC with two different annotations for normal functions and those with unusual calling conventions. Use these for the compat VDSO, allowing us to drop the custom ARM_ENTRY() and ARM_ENDPROC() macros. Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso32/sigreturn.S | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S index 1a81277c2d09..620524969696 100644 --- a/arch/arm64/kernel/vdso32/sigreturn.S +++ b/arch/arm64/kernel/vdso32/sigreturn.S @@ -10,13 +10,6 @@ #include #include -#define ARM_ENTRY(name) \ - ENTRY(name) - -#define ARM_ENDPROC(name) \ - .type name, %function; \ - END(name) - .text .arm @@ -24,39 +17,39 @@ .save {r0-r15} .pad #COMPAT_SIGFRAME_REGS_OFFSET nop -ARM_ENTRY(__kernel_sigreturn_arm) +SYM_FUNC_START(__kernel_sigreturn_arm) mov r7, #__NR_compat_sigreturn svc #0 .fnend -ARM_ENDPROC(__kernel_sigreturn_arm) +SYM_FUNC_END(__kernel_sigreturn_arm) .fnstart .save {r0-r15} .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET nop -ARM_ENTRY(__kernel_rt_sigreturn_arm) +SYM_FUNC_START(__kernel_rt_sigreturn_arm) mov r7, #__NR_compat_rt_sigreturn svc #0 .fnend -ARM_ENDPROC(__kernel_rt_sigreturn_arm) +SYM_FUNC_END(__kernel_rt_sigreturn_arm) .thumb .fnstart .save {r0-r15} .pad #COMPAT_SIGFRAME_REGS_OFFSET nop -ARM_ENTRY(__kernel_sigreturn_thumb) +SYM_FUNC_START(__kernel_sigreturn_thumb) mov r7, #__NR_compat_sigreturn svc #0 .fnend -ARM_ENDPROC(__kernel_sigreturn_thumb) +SYM_FUNC_END(__kernel_sigreturn_thumb) .fnstart .save {r0-r15} .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET nop -ARM_ENTRY(__kernel_rt_sigreturn_thumb) +SYM_FUNC_START(__kernel_rt_sigreturn_thumb) mov r7, #__NR_compat_rt_sigreturn svc #0 .fnend -ARM_ENDPROC(__kernel_rt_sigreturn_thumb) +SYM_FUNC_END(__kernel_rt_sigreturn_thumb) -- cgit v1.2.3 From e9c7ddbf8b4b6a291bf3b5bfa7c883235164d9be Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 20 Jan 2020 18:52:29 +0000 Subject: arm64: csum: Optimise IPv6 header checksum Throwing our __uint128_t idioms at csum_ipv6_magic() makes it about 1.3x-2x faster across a range of microarchitecture/compiler combinations. Not much in absolute terms, but every little helps. Signed-off-by: Robin Murphy Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/checksum.h | 7 ++++++- arch/arm64/lib/csum.c | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 8d2a7de39744..b6f7bc6da5fb 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -5,7 +5,12 @@ #ifndef __ASM_CHECKSUM_H #define __ASM_CHECKSUM_H -#include +#include + +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); static inline __sum16 csum_fold(__wsum csum) { diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 1f82c66b32ea..60eccae2abad 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -124,3 +124,30 @@ unsigned int do_csum(const unsigned char *buff, int len) return sum >> 16; } + +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + __uint128_t src, dst; + u64 sum = (__force u64)csum; + + src = *(const __uint128_t *)saddr->s6_addr; + dst = *(const __uint128_t *)daddr->s6_addr; + + sum += (__force u32)htonl(len); +#ifdef __LITTLE_ENDIAN + sum += (u32)proto << 24; +#else + sum += proto; +#endif + src += (src >> 64) | (src << 64); + dst += (dst >> 64) | (dst << 64); + + sum = accumulate(sum, src >> 64); + sum = accumulate(sum, dst >> 64); + + sum += ((sum >> 32) | (sum << 32)); + return csum_fold((__force __wsum)(sum >> 32)); +} +EXPORT_SYMBOL(csum_ipv6_magic); -- cgit v1.2.3 From b8f58ac7c38af1e22db125622a3a3e9bb9fb9fa2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 Mar 2020 14:20:52 +0900 Subject: arm64: efi: add efi-entry.o to targets instead of extra-$(CONFIG_EFI) efi-entry.o is built on demand for efi-entry.stub.o, so you do not have to repeat $(CONFIG_EFI) here. Adding it to 'targets' is enough. Signed-off-by: Masahiro Yamada Signed-off-by: Catalin Marinas Acked-by: Ard Biesheuvel Reviewed-by: Vincenzo Frascino --- arch/arm64/kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index fc6488660f64..4e5b8ee31442 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -21,7 +21,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ syscall.o -extra-$(CONFIG_EFI) := efi-entry.o +targets += efi-entry.o OBJCOPYFLAGS := --prefix-symbols=__efistub_ $(obj)/%.stub.o: $(obj)/%.o FORCE -- cgit v1.2.3 From 69d113b5c40258aeac15c2eadf6c5dddfbfcf2ae Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Wed, 11 Mar 2020 11:36:53 +0900 Subject: arm64: entry-ftrace.S: Fix missing argument for CONFIG_FUNCTION_GRAPH_TRACER=y Missing argument of another SYM_INNER_LABEL() breaks build for CONFIG_FUNCTION_GRAPH_TRACER=y. Fixes: e2d591d29d44 ("arm64: entry-ftrace.S: Convert to modern annotations for assembly functions") Signed-off-by: Kunihiko Hayashi Signed-off-by: Catalin Marinas Acked-by: Mark Brown --- arch/arm64/kernel/entry-ftrace.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 820101821ac4..833d48c9acb5 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -279,7 +279,7 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr); // where xxx can be any kind of tracer. #ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_INNER_LABEL(ftrace_graph_call) // ftrace_graph_caller(); +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif -- cgit v1.2.3 From 9a25136a61179fe79689abf3c3fc67f89ce9ec13 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 10 Mar 2020 16:25:44 -0700 Subject: arm64: Mark call_smc_arch_workaround_1 as __maybe_unused When building allnoconfig: arch/arm64/kernel/cpu_errata.c:174:13: warning: unused function 'call_smc_arch_workaround_1' [-Wunused-function] static void call_smc_arch_workaround_1(void) ^ 1 warning generated. Follow arch/arm and mark this function as __maybe_unused. Fixes: 4db61fef16a1 ("arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations") Signed-off-by: Nathan Chancellor Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu_errata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6a2ca339741c..df56d2295d16 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -171,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, #include -static void call_smc_arch_workaround_1(void) +static void __maybe_unused call_smc_arch_workaround_1(void) { arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -- cgit v1.2.3 From f0c0d4b74d59809568f560001c8f88e8211334a4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 28 Feb 2020 14:59:42 +0000 Subject: arm64: entry: unmask IRQ in el0_sp() Currently, the EL0 SP alignment handler masks IRQs unnecessarily. It does so due to historic code sharing of the EL0 SP and PC alignment handlers, and branch predictor hardening applicable to the EL0 SP handler. We began masking IRQs in the EL0 SP alignment handler in commit: 5dfc6ed27710c42c ("arm64: entry: Apply BP hardening for high-priority synchronous exception") ... as this shared code with the EL0 PC alignment handler, and branch predictor hardening made it necessary to disable IRQs for early parts of the EL0 PC alignment handler. It was not necessary to mask IRQs during EL0 SP alignment exceptions, but it was not considered harmful to do so. This masking was carried forward into C code in commit: 582f95835a8fc812 ("arm64: entry: convert el0_sync to C") ... where the SP/PC cases were split into separate handlers, and the masking duplicated. Subsequently the EL0 PC alignment handler was refactored to perform branch predictor hardening before unmasking IRQs, in commit: bfe298745afc9548 ("arm64: entry-common: don't touch daif before bp-hardening") ... but the redundant masking of IRQs was not removed from the EL0 SP alignment handler. Let's do so now, and make it interruptible as with most other synchronous exception handlers. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Catalin Marinas Reviewed-by: James Morse --- arch/arm64/kernel/entry-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index fde59981445c..c839b5bf1904 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -175,7 +175,7 @@ NOKPROBE_SYMBOL(el0_pc); static void notrace el0_sp(struct pt_regs *regs, unsigned long esr) { user_exit_irqoff(); - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(DAIF_PROCCTX); do_sp_pc_abort(regs->sp, esr, regs); } NOKPROBE_SYMBOL(el0_sp); -- cgit v1.2.3 From 1db5cdeccd813330aaab19b3fccab15e1d07fe12 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 21 Feb 2020 14:50:21 +0000 Subject: arm64: cpufeature: add cpus_have_final_cap() When cpus_have_const_cap() was originally introduced it was intended to be safe in hyp context, where it is not safe to access the cpu_hwcaps array as cpus_have_cap() did. For more details see commit: a4023f682739439b ("arm64: Add hypervisor safe helper for checking constant capabilities") We then made use of cpus_have_const_cap() throughout the kernel. Subsequently, we had to defer updating the static_key associated with each capability in order to avoid lockdep complaints. To avoid breaking kernel-wide usage of cpus_have_const_cap(), this was updated to fall back to the cpu_hwcaps array if called before the static_keys were updated. As the kvm hyp code was only called later than this, the fallback is redundant but not functionally harmful. For more details, see commit: 63a1e1c95e60e798 ("arm64/cpufeature: don't use mutex in bringup path") Today we have more users of cpus_have_const_cap() which are only called once the relevant static keys are initialized, and it would be beneficial to avoid the redundant code. To that end, this patch adds a new cpus_have_final_cap(), helper which is intend to be used in code which is only run once capabilities have been finalized, and will never check the cpus_hwcap array. This helps the compiler to generate better code as it no longer needs to generate code to address and test the cpus_hwcap array. To help catch misuse, cpus_have_final_cap() will BUG() if called before capabilities are finalized. In hyp context, BUG() will result in a hyp panic, but the specific BUG() instance will not be identified in the usual way. Comments are added to the various cpus_have_*_cap() helpers to describe the constraints on when they can be used. For clarity cpus_have_cap() is moved above the other helpers. Similarly the helpers are updated to use system_capabilities_finalized() consistently, and this is made __always_inline as required by its new callers. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Reviewed-by: Suzuki K Poulose Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 58 ++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..940b2b67b428 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -390,14 +390,16 @@ unsigned long cpu_get_elf_hwcap2(void); #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) -/* System capability check for constant caps */ -static __always_inline bool __cpus_have_const_cap(int num) +static __always_inline bool system_capabilities_finalized(void) { - if (num >= ARM64_NCAPS) - return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return static_branch_likely(&arm64_const_caps_ready); } +/* + * Test for a capability with a runtime check. + * + * Before the capability is detected, this returns false. + */ static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) @@ -405,14 +407,53 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this returns false. + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool __cpus_have_const_cap(int num) +{ + if (num >= ARM64_NCAPS) + return false; + return static_branch_unlikely(&cpu_hwcap_keys[num]); +} + +/* + * Test for a capability, possibly with a runtime check. + * + * Before capabilities are finalized, this behaves as cpus_have_cap(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ static __always_inline bool cpus_have_const_cap(int num) { - if (static_branch_likely(&arm64_const_caps_ready)) + if (system_capabilities_finalized()) return __cpus_have_const_cap(num); else return cpus_have_cap(num); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this will BUG(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool cpus_have_final_cap(int num) +{ + if (system_capabilities_finalized()) + return __cpus_have_const_cap(num); + else + BUG(); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { @@ -613,11 +654,6 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } -static inline bool system_capabilities_finalized(void) -{ - return static_branch_likely(&arm64_const_caps_ready); -} - #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 -- cgit v1.2.3 From b5475d8caedb71476f999a858ea3f8c24c5f9e50 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 21 Feb 2020 14:50:22 +0000 Subject: arm64: kvm: hyp: use cpus_have_final_cap() The KVM hyp code is only run after system capabilities have been finalized, and thus all const cap checks have been patched. This is noted in in __cpu_init_hyp_mode(), where we BUG() if called too early: | /* | * Call initialization code, and switch to the full blown HYP code. | * If the cpucaps haven't been finalized yet, something has gone very | * wrong, and hyp will crash and burn when it uses any | * cpus_have_const_cap() wrapper. | */ Given this, the hyp code can use cpus_have_final_cap() and avoid generating code to check the cpu_hwcaps array, which would be unsafe to run in hyp context. This patch migrate the KVM hyp code to cpus_have_final_cap(), avoiding this redundant code generation, and making it possible to detect if we accidentally invoke this code too early. In the latter case, the BUG() in cpus_have_final_cap() will cause a hyp panic. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: James Morse Cc: Julien Thierry Cc: Suzuki Poulouse Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/switch.c | 14 +++++++------- arch/arm64/kvm/hyp/sysreg-sr.c | 8 ++++---- arch/arm64/kvm/hyp/tlb.c | 8 ++++---- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index dfe8dd172512..27fcdff08dd6 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -127,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) write_sysreg(val, cptr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; isb(); @@ -146,12 +146,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 hcr = vcpu->arch.hcr_el2; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) hcr |= HCR_TVM; write_sysreg(hcr, hcr_el2); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); if (has_vhe()) @@ -181,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) { u64 mdcr_el2 = read_sysreg(mdcr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -328,7 +328,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) * resolve the IPA using the AT instruction. */ if (!(esr & ESR_ELx_S1PTW) && - (cpus_have_const_cap(ARM64_WORKAROUND_834220) || + (cpus_have_final_cap(ARM64_WORKAROUND_834220) || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { if (!__translate_far_to_hpfar(far, &hpfar)) return false; @@ -498,7 +498,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (*exit_code != ARM_EXCEPTION_TRAP) goto exit; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && handle_tx2_tvm(vcpu)) return true; @@ -555,7 +555,7 @@ exit: static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) { - if (!cpus_have_const_cap(ARM64_SSBD)) + if (!cpus_have_final_cap(ARM64_SSBD)) return false; return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7672a978926c..75b1925763f1 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); } @@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); } else if (!ctxt->__hyp_running_vcpu) { @@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && ctxt->__hyp_running_vcpu) { /* * Must only be done for host registers, hence the context @@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); write_sysreg_el2(pstate, SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 92f560e3e1aa..ceaddbe4279f 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, local_irq_save(cxt->flags); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, * we cannot trust stage-1 to be in a correct state at that @@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR); @@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, { write_sysreg(0, vttbr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { /* Ensure write of the host VMID */ isb(); /* Restore the host's TCR_EL1 */ -- cgit v1.2.3 From 0c837c4f73d1a31f47e80ebc50bcf23668d895c4 Mon Sep 17 00:00:00 2001 From: 韩科才 Date: Sun, 15 Mar 2020 12:01:19 +0800 Subject: arm64: fix spelling mistake "ca not" -> "cannot" There is a spelling mistake in the comment, Fix it. Signed-off-by: hankecai Signed-off-by: Catalin Marinas --- arch/arm64/lib/strcmp.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 4767540d1b94..4e79566726c8 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -186,7 +186,7 @@ CPU_LE( rev data2, data2 ) * as carry-propagation can corrupt the upper bits if the trailing * bytes in the string contain 0x01. * However, if there is no NUL byte in the dword, we can generate - * the result directly. We ca not just subtract the bytes as the + * the result directly. We cannot just subtract the bytes as the * MSB might be significant. */ CPU_BE( cbnz has_nul, 1f ) -- cgit v1.2.3 From 62b9562a1c46913349e7897c4a39c544f176b3da Mon Sep 17 00:00:00 2001 From: Zheng Wei Date: Fri, 13 Mar 2020 22:54:02 +0800 Subject: arm64: add blank after 'if' add blank after 'if' for armv8_deprecated_init() to make it comply with kernel coding style. Signed-off-by: Zheng Wei Signed-off-by: Catalin Marinas --- arch/arm64/kernel/armv8_deprecated.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 7832b3216370..4cc581af2d96 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -630,7 +630,7 @@ static int __init armv8_deprecated_init(void) register_insn_emulation(&cp15_barrier_ops); if (IS_ENABLED(CONFIG_SETEND_EMULATION)) { - if(system_supports_mixed_endian_el0()) + if (system_supports_mixed_endian_el0()) register_insn_emulation(&setend_ops); else pr_info("setend instruction emulation is not supported on this system\n"); -- cgit v1.2.3 From c2f4afdc3f99403d6d2ef37509c04caa98374620 Mon Sep 17 00:00:00 2001 From: Li Tao Date: Wed, 11 Mar 2020 15:31:55 +0800 Subject: arm64: kexec_file: Fixed code style. Remove unnecessary blank. Signed-off-by: Li Tao Signed-off-by: Catalin Marinas --- arch/arm64/kernel/machine_kexec_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index dd3ae8081b38..b40c3b0def92 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -121,7 +121,7 @@ static int setup_dtb(struct kimage *image, /* add kaslr-seed */ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); - if (ret == -FDT_ERR_NOTFOUND) + if (ret == -FDT_ERR_NOTFOUND) ret = 0; else if (ret) goto out; -- cgit v1.2.3 From 24b2cce91f47d19fcc5a2c4c60dbabbd0e30adf1 Mon Sep 17 00:00:00 2001 From: 韩科才 Date: Wed, 11 Mar 2020 14:52:49 +0800 Subject: arm64: remove redundant blank for '=' operator remove redundant blank for '=' operator, it may be more elegant. Signed-off-by: hankecai Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..ce60d1012bfa 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -551,7 +551,7 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) BUG_ON(!reg); - for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { u64 ftr_mask = arm64_ftr_mask(ftrp); s64 ftr_new = arm64_ftr_value(ftrp, new); -- cgit v1.2.3 From d22b115cbfbb7e4a938f9eb6ea77da9ecac3df5a Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 2 Mar 2020 13:03:40 +1100 Subject: arm64/kernel: Simplify __cpu_up() by bailing out early The function __cpu_up() is invoked to bring up the target CPU through the backend, PSCI for example. The nested if statements won't be needed if we bail out early on the following two conditions where the status won't be checked. The code looks simplified in that case. * Error returned from the backend (e.g. PSCI) * The target CPU has been marked as onlined Signed-off-by: Gavin Shan Signed-off-by: Catalin Marinas Reviewed-by: Mark Rutland --- arch/arm64/kernel/smp.c | 79 +++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..2a9d8f39dc58 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -115,60 +115,55 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) update_cpu_boot_status(CPU_MMU_OFF); __flush_dcache_area(&secondary_data, sizeof(secondary_data)); - /* - * Now bring the CPU into our world. - */ + /* Now bring the CPU into our world */ ret = boot_secondary(cpu, idle); - if (ret == 0) { - /* - * CPU was successfully started, wait for it to come online or - * time out. - */ - wait_for_completion_timeout(&cpu_running, - msecs_to_jiffies(5000)); - - if (!cpu_online(cpu)) { - pr_crit("CPU%u: failed to come online\n", cpu); - ret = -EIO; - } - } else { + if (ret) { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); return ret; } + /* + * CPU was successfully started, wait for it to come online or + * time out. + */ + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(5000)); + if (cpu_online(cpu)) + return 0; + + pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; secondary_data.stack = NULL; __flush_dcache_area(&secondary_data, sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); - if (ret && status) { - - if (status == CPU_MMU_OFF) - status = READ_ONCE(__early_cpu_boot_status); + if (status == CPU_MMU_OFF) + status = READ_ONCE(__early_cpu_boot_status); - switch (status & CPU_BOOT_STATUS_MASK) { - default: - pr_err("CPU%u: failed in unknown state : 0x%lx\n", - cpu, status); - cpus_stuck_in_kernel++; - break; - case CPU_KILL_ME: - if (!op_cpu_kill(cpu)) { - pr_crit("CPU%u: died during early boot\n", cpu); - break; - } - pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ - case CPU_STUCK_IN_KERNEL: - pr_crit("CPU%u: is stuck in kernel\n", cpu); - if (status & CPU_STUCK_REASON_52_BIT_VA) - pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); - if (status & CPU_STUCK_REASON_NO_GRAN) - pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K); - cpus_stuck_in_kernel++; + switch (status & CPU_BOOT_STATUS_MASK) { + default: + pr_err("CPU%u: failed in unknown state : 0x%lx\n", + cpu, status); + cpus_stuck_in_kernel++; + break; + case CPU_KILL_ME: + if (!op_cpu_kill(cpu)) { + pr_crit("CPU%u: died during early boot\n", cpu); break; - case CPU_PANIC_KERNEL: - panic("CPU%u detected unsupported configuration\n", cpu); } + pr_crit("CPU%u: may not have shut down cleanly\n", cpu); + /* Fall through */ + case CPU_STUCK_IN_KERNEL: + pr_crit("CPU%u: is stuck in kernel\n", cpu); + if (status & CPU_STUCK_REASON_52_BIT_VA) + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); + if (status & CPU_STUCK_REASON_NO_GRAN) { + pr_crit("CPU%u: does not support %luK granule\n", + cpu, PAGE_SIZE / SZ_1K); + } + cpus_stuck_in_kernel++; + break; + case CPU_PANIC_KERNEL: + panic("CPU%u detected unsupported configuration\n", cpu); } return ret; -- cgit v1.2.3 From c17a290f7e7e59d24b4507736b7b40b0eb5f8f1f Mon Sep 17 00:00:00 2001 From: "glider@google.com" Date: Thu, 12 Mar 2020 16:59:20 +0100 Subject: arm64: define __alloc_zeroed_user_highpage When running the kernel with init_on_alloc=1, calling the default implementation of __alloc_zeroed_user_highpage() from include/linux/highmem.h leads to double-initialization of the allocated page (first by the page allocator, then by clear_user_page(). Calling alloc_page_vma() with __GFP_ZERO, similarly to e.g. x86, seems to be enough to ensure the user page is zeroed only once. Signed-off-by: Alexander Potapenko Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/page.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index d39ddb258a04..75d6cd23a679 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -21,6 +21,10 @@ extern void __cpu_copy_user_page(void *to, const void *from, extern void copy_page(void *to, const void *from); extern void clear_page(void *to); +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) -- cgit v1.2.3 From 29227d6ea1572b160e5bea45b3c93a0346444dfa Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 17 Mar 2020 18:22:54 +0000 Subject: arm64: perf: Clean up enable/disable calls Reading this code bordered on painful, what with all the repetition and pointless return values. More fundamentally, dribbling the hardware enables and disables in one bit at a time incurs needless system register overhead for chained events and on reset. We already use bitmask values for the KVM hooks, so consolidate all the register accesses to match, and make a reasonable saving in both source and object code. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 87 +++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 52 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 726cd8bda025..8062d79f4cbb 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -450,86 +450,74 @@ static inline void armv8pmu_write_event_type(struct perf_event *event) } } -static inline int armv8pmu_enable_counter(int idx) +static u32 armv8pmu_event_cnten_mask(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmcntenset_el0); - return idx; + int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + u32 mask = BIT(counter); + + if (armv8pmu_event_is_chained(event)) + mask |= BIT(counter - 1); + return mask; +} + +static inline void armv8pmu_enable_counter(u32 mask) +{ + write_sysreg(mask, pmcntenset_el0); } static inline void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - int idx = event->hw.idx; - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); - - if (armv8pmu_event_is_chained(event)) - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + u32 mask = armv8pmu_event_cnten_mask(event); - kvm_set_pmu_events(counter_bits, attr); + kvm_set_pmu_events(mask, attr); /* We rely on the hypervisor switch code to enable guest counters */ - if (!kvm_pmu_counter_deferred(attr)) { - armv8pmu_enable_counter(idx); - if (armv8pmu_event_is_chained(event)) - armv8pmu_enable_counter(idx - 1); - } + if (!kvm_pmu_counter_deferred(attr)) + armv8pmu_enable_counter(mask); } -static inline int armv8pmu_disable_counter(int idx) +static inline void armv8pmu_disable_counter(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmcntenclr_el0); - return idx; + write_sysreg(mask, pmcntenclr_el0); } static inline void armv8pmu_disable_event_counter(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; struct perf_event_attr *attr = &event->attr; - int idx = hwc->idx; - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); + u32 mask = armv8pmu_event_cnten_mask(event); - if (armv8pmu_event_is_chained(event)) - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); - - kvm_clr_pmu_events(counter_bits); + kvm_clr_pmu_events(mask); /* We rely on the hypervisor switch code to disable guest counters */ - if (!kvm_pmu_counter_deferred(attr)) { - if (armv8pmu_event_is_chained(event)) - armv8pmu_disable_counter(idx - 1); - armv8pmu_disable_counter(idx); - } + if (!kvm_pmu_counter_deferred(attr)) + armv8pmu_disable_counter(mask); } -static inline int armv8pmu_enable_intens(int idx) +static inline void armv8pmu_enable_intens(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmintenset_el1); - return idx; + write_sysreg(mask, pmintenset_el1); } -static inline int armv8pmu_enable_event_irq(struct perf_event *event) +static inline void armv8pmu_enable_event_irq(struct perf_event *event) { - return armv8pmu_enable_intens(event->hw.idx); + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + armv8pmu_enable_intens(BIT(counter)); } -static inline int armv8pmu_disable_intens(int idx) +static inline void armv8pmu_disable_intens(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmintenclr_el1); + write_sysreg(mask, pmintenclr_el1); isb(); /* Clear the overflow flag in case an interrupt is pending. */ - write_sysreg(BIT(counter), pmovsclr_el0); + write_sysreg(mask, pmovsclr_el0); isb(); - - return idx; } -static inline int armv8pmu_disable_event_irq(struct perf_event *event) +static inline void armv8pmu_disable_event_irq(struct perf_event *event) { - return armv8pmu_disable_intens(event->hw.idx); + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + armv8pmu_disable_intens(BIT(counter)); } static inline u32 armv8pmu_getreset_flags(void) @@ -814,14 +802,9 @@ static int armv8pmu_filter_match(struct perf_event *event) static void armv8pmu_reset(void *info) { - struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 idx, nb_cnt = cpu_pmu->num_events; - /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { - armv8pmu_disable_counter(idx); - armv8pmu_disable_intens(idx); - } + armv8pmu_disable_counter(U32_MAX); + armv8pmu_disable_intens(U32_MAX); /* Clear the counters we flip at guest entry/exit */ kvm_clr_pmu_events(U32_MAX); -- cgit v1.2.3 From 8e35aa642ee4dab01b16cc4b2df59d1936f3b3c2 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:50 +0000 Subject: arm64: cpufeature: Extract capped perfmon fields When emulating ID registers there is often a need to cap the version bits of a feature such that the guest will not use features that the host is not aware of. For example, when KVM mediates access to the PMU by emulating register accesses. Let's add a helper that extracts a performance monitors ID field and caps the version to a given value. Fields that identify the version of the Performance Monitors Extension do not follow the standard ID scheme, and instead follow the scheme described in ARM DDI 0487E.a page D13-2825 "Alternative ID scheme used for the Performance Monitors Extension version". The value 0xF means an IMPLEMENTATION DEFINED PMU is present, and values 0x0-OxE can be treated the same as an unsigned field with 0x0 meaning no PMU is present. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: rework to handle perfmon fields] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..186f4e19207e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -447,6 +447,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field) return cpuid_feature_extract_unsigned_field_width(features, field, 4); } +/* + * Fields that identify the version of the Performance Monitors Extension do + * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825, + * "Alternative ID scheme used for the Performance Monitors Extension version". + */ +static inline u64 __attribute_const__ +cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) +{ + u64 val = cpuid_feature_extract_unsigned_field(features, field); + u64 mask = GENMASK_ULL(field + 3, field); + + /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ + if (val == 0xf) + val = 0; + + if (val > cap) { + features &= ~mask; + features |= (cap << field) & mask; + } + + return features; +} + static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); -- cgit v1.2.3 From c854188ea01062f5a5fd7f05658feb1863774eaa Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:51 +0000 Subject: KVM: arm64: limit PMU version to PMUv3 for ARMv8.1 We currently expose the PMU version of the host to the guest via emulation of the DFR0_EL1 and AA64DFR0_EL1 debug feature registers. However many of the features offered beyond PMUv3 for 8.1 are not supported in KVM. Examples of this include support for the PMMIR registers (added in PMUv3 for ARMv8.4) and 64-bit event counters added in (PMUv3 for ARMv8.5). Let's trap the Debug Feature Registers in order to limit PMUVer/PerfMon in the Debug Feature Registers to PMUv3 for ARMv8.1 to avoid unexpected behaviour. Both ID_AA64DFR0.PMUVer and ID_DFR0.PerfMon follow the "Alternative ID scheme used for the Performance Monitors Extension version" where 0xF means an IMPLEMENTATION DEFINED PMU is implemented, and values 0x0-0xE are treated as with an unsigned field (with 0x0 meaning no PMU is present). As we don't expect to expose an IMPLEMENTATION DEFINED PMU, and our cap is below 0xF, we can treat these fields as unsigned when applying the cap. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: make field names consistent, use perfmon cap] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/kvm/sys_regs.c | 10 ++++++++++ 2 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b91570ff9db1..d8f1eed070f0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,6 +702,12 @@ #define ID_AA64DFR0_TRACEVER_SHIFT 4 #define ID_AA64DFR0_DEBUGVER_SHIFT 0 +#define ID_AA64DFR0_PMUVER_8_1 0x4 + +#define ID_DFR0_PERFMON_SHIFT 24 + +#define ID_DFR0_PERFMON_8_1 0x4 + #define ID_ISAR5_RDM_SHIFT 24 #define ID_ISAR5_CRC32_SHIFT 16 #define ID_ISAR5_SHA2_SHIFT 12 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..b0a3e8976b90 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1085,6 +1085,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (0xfUL << ID_AA64ISAR1_API_SHIFT) | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); + } else if (id == SYS_ID_AA64DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_AA64DFR0_PMUVER_SHIFT, + ID_AA64DFR0_PMUVER_8_1); + } else if (id == SYS_ID_DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_DFR0_PERFMON_SHIFT, + ID_DFR0_PERFMON_8_1); } return val; -- cgit v1.2.3 From 8673e02e58410e6c4cefa499efa846286e45a991 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:52 +0000 Subject: arm64: perf: Add support for ARMv8.5-PMU 64-bit counters At present ARMv8 event counters are limited to 32-bits, though by using the CHAIN event it's possible to combine adjacent counters to achieve 64-bits. The perf config1:0 bit can be set to use such a configuration. With the introduction of ARMv8.5-PMU support, all event counters can now be used as 64-bit counters. Let's enable 64-bit event counters where support exists. Unless the user sets config1:0 we will adjust the counter value such that it overflows upon 32-bit overflow. This follows the same behaviour as the cycle counter which has always been (and remains) 64-bits. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: fix ID field names, compare with 8.5 value] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/perf_event.h | 3 +- arch/arm64/include/asm/sysreg.h | 4 ++ arch/arm64/kernel/perf_event.c | 87 ++++++++++++++++++++++++++++++------- include/linux/perf/arm_pmu.h | 1 + 4 files changed, 78 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2bdbc79bbd01..e7765b62c712 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -176,9 +176,10 @@ #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ +#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ /* * PMOVSR: counters overflow flag status reg diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d8f1eed070f0..9b66c5b5b36f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,7 +702,11 @@ #define ID_AA64DFR0_TRACEVER_SHIFT 4 #define ID_AA64DFR0_DEBUGVER_SHIFT 0 +#define ID_AA64DFR0_PMUVER_8_0 0x1 #define ID_AA64DFR0_PMUVER_8_1 0x4 +#define ID_AA64DFR0_PMUVER_8_4 0x5 +#define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 8062d79f4cbb..4d7879484cec 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -285,6 +285,17 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) + +/* + * We unconditionally enable ARMv8.5-PMU long event counter support + * (64-bit events) where supported. Indicate if this arm_pmu has long + * event counter support. + */ +static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) +{ + return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); +} + /* * We must chain two programmable counters for 64 bit events, * except when we have allocated the 64bit cycle counter (for CPU @@ -294,9 +305,11 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { static inline bool armv8pmu_event_is_chained(struct perf_event *event) { int idx = event->hw.idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); return !WARN_ON(idx < 0) && armv8pmu_event_is_64bit(event) && + !armv8pmu_has_long_event(cpu_pmu) && (idx != ARMV8_IDX_CYCLE_COUNTER); } @@ -345,7 +358,7 @@ static inline void armv8pmu_select_counter(int idx) isb(); } -static inline u32 armv8pmu_read_evcntr(int idx) +static inline u64 armv8pmu_read_evcntr(int idx) { armv8pmu_select_counter(idx); return read_sysreg(pmxevcntr_el0); @@ -362,6 +375,44 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) return val; } +/* + * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP + * is set the event counters also become 64-bit counters. Unless the + * user has requested a long counter (attr.config1) then we want to + * interrupt upon 32-bit overflow - we achieve this by applying a bias. + */ +static bool armv8pmu_event_needs_bias(struct perf_event *event) +{ + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (armv8pmu_event_is_64bit(event)) + return false; + + if (armv8pmu_has_long_event(cpu_pmu) || + idx == ARMV8_IDX_CYCLE_COUNTER) + return true; + + return false; +} + +static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) +{ + if (armv8pmu_event_needs_bias(event)) + value |= GENMASK(63, 32); + + return value; +} + +static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) +{ + if (armv8pmu_event_needs_bias(event)) + value &= ~GENMASK(63, 32); + + return value; +} + static u64 armv8pmu_read_counter(struct perf_event *event) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); @@ -377,10 +428,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event) else value = armv8pmu_read_hw_counter(event); - return value; + return armv8pmu_unbias_long_counter(event, value); } -static inline void armv8pmu_write_evcntr(int idx, u32 value) +static inline void armv8pmu_write_evcntr(int idx, u64 value) { armv8pmu_select_counter(idx); write_sysreg(value, pmxevcntr_el0); @@ -405,20 +456,14 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + value = armv8pmu_bias_long_counter(event, value); + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); - else if (idx == ARMV8_IDX_CYCLE_COUNTER) { - /* - * The cycles counter is really a 64-bit counter. - * When treating it as a 32-bit counter, we only count - * the lower 32 bits, and set the upper 32-bits so that - * we get an interrupt upon 32-bit overflow. - */ - if (!armv8pmu_event_is_64bit(event)) - value |= 0xffffffff00000000ULL; + else if (idx == ARMV8_IDX_CYCLE_COUNTER) write_sysreg(value, pmccntr_el0); - } else + else armv8pmu_write_hw_counter(event, value); } @@ -731,7 +776,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* * Otherwise use events counters */ - if (armv8pmu_event_is_64bit(event)) + if (armv8pmu_event_is_64bit(event) && + !armv8pmu_has_long_event(cpu_pmu)) return armv8pmu_get_chain_idx(cpuc, cpu_pmu); else return armv8pmu_get_single_idx(cpuc, cpu_pmu); @@ -802,6 +848,9 @@ static int armv8pmu_filter_match(struct perf_event *event) static void armv8pmu_reset(void *info) { + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; + u32 pmcr; + /* The counter and interrupt enable registers are unknown at reset. */ armv8pmu_disable_counter(U32_MAX); armv8pmu_disable_intens(U32_MAX); @@ -813,8 +862,13 @@ static void armv8pmu_reset(void *info) * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). */ - armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | - ARMV8_PMU_PMCR_LC); + pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC; + + /* Enable long event counter support where available */ + if (armv8pmu_has_long_event(cpu_pmu)) + pmcr |= ARMV8_PMU_PMCR_LP; + + armv8pmu_pmcr_write(pmcr); } static int __armv8_pmuv3_map_event(struct perf_event *event, @@ -897,6 +951,7 @@ static void __armv8pmu_probe_pmu(void *info) if (pmuver == 0xf || pmuver == 0) return; + cpu_pmu->pmuver = pmuver; probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 71f525a35ac2..5b616dde9a4c 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -80,6 +80,7 @@ struct arm_pmu { struct pmu pmu; cpumask_t supported_cpus; char *name; + int pmuver; irqreturn_t (*handle_irq)(struct arm_pmu *pmu); void (*enable)(struct perf_event *event); void (*disable)(struct perf_event *event); -- cgit v1.2.3 From 3ff047f6971d3c3aefd1b8972ac94a4301a70902 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:48 +0530 Subject: arm64: cpufeature: Fix meta-capability cpufeature check Some existing/future meta cpucaps match need the presence of individual cpucaps. Currently the individual cpucaps checks it via an array based flag and this introduces dependency on the array entry order. This limitation exists only for system scope cpufeature. This patch introduces an internal helper function (__system_matches_cap) to invoke the matching handler for system scope. This helper has to be used during a narrow window when, - The system wide safe registers are set with all the SMP CPUs and, - The SYSTEM_FEATURE cpu_hwcaps may not have been set. Normal users should use the existing cpus_have_{const_}cap() global function. Suggested-by: Suzuki K Poulose Suggested-by: Catalin Marinas Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..4f2e95e6ecd1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); +static bool __system_matches_cap(unsigned int n); + /* * NOTE: Any changes to the visibility of features should be kept in * sync with the documentation of the CPU feature register ABI. @@ -2146,6 +2148,23 @@ bool this_cpu_has_cap(unsigned int n) return false; } +/* + * This helper function is used in a narrow window when, + * - The system wide safe registers are set with all the SMP CPUs and, + * - The SYSTEM_FEATURE cpu_hwcaps may not have been set. + * In all other cases cpus_have_{const_}cap() should be used. + */ +static bool __system_matches_cap(unsigned int n) +{ + if (n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_SYSTEM); + } + return false; +} + void cpu_set_feature(unsigned int num) { WARN_ON(num >= MAX_CPU_FEATURES); @@ -2218,7 +2237,7 @@ void __init setup_cpu_features(void) static bool __maybe_unused cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) { - return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); + return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO)); } static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) -- cgit v1.2.3 From cfef06bd0686a578aa53e039c9aec0b1a5581d3b Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:49 +0530 Subject: arm64: cpufeature: add pointer auth meta-capabilities To enable pointer auth for the kernel, we're going to need to check for the presence of address auth and generic auth using alternative_if. We currently have two cpucaps for each, but alternative_if needs to check a single cpucap. So define meta-capabilities that are present when either of the current two capabilities is present. Leave the existing four cpucaps in place, as they are still needed to check for mismatched systems where one CPU has the architected algorithm but another has the IMP DEF algorithm. Note, the meta-capabilities were present before but were removed in commit a56005d32105 ("arm64: cpufeature: Reduce number of pointer auth CPU caps from 6 to 4") and commit 1e013d06120c ("arm64: cpufeature: Rework ptr auth hwcaps using multi_entry_cap_matches"), as they were not needed then. Note, unlike before, the current patch checks the cpucap values directly, instead of reading the CPU ID register value. Reviewed-by: Suzuki K Poulose Reviewed-by: Kees Cook Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: commit message and macro rebase, use __system_matches_cap] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpucaps.h | 4 +++- arch/arm64/include/asm/cpufeature.h | 6 ++---- arch/arm64/kernel/cpufeature.c | 25 ++++++++++++++++++++++++- 3 files changed, 29 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 865e0253fc1e..72e4e0580ddb 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -58,7 +58,9 @@ #define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_HAS_E0PD 49 #define ARM64_HAS_RNG 50 +#define ARM64_HAS_ADDRESS_AUTH 51 +#define ARM64_HAS_GENERIC_AUTH 52 -#define ARM64_NCAPS 51 +#define ARM64_NCAPS 53 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..8c8048372c8e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -590,15 +590,13 @@ static inline bool system_supports_cnp(void) static inline bool system_supports_address_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH); } static inline bool system_supports_generic_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); } static inline bool system_uses_irq_prio_masking(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4f2e95e6ecd1..01f50f043831 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1323,6 +1323,20 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); } + +static bool has_address_auth(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || + __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF); +} + +static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || + __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF); +} #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_E0PD @@ -1600,7 +1614,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR1_APA_SHIFT, .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, .matches = has_cpuid_feature, - .cpu_enable = cpu_enable_address_auth, }, { .desc = "Address authentication (IMP DEF algorithm)", @@ -1611,6 +1624,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR1_API_SHIFT, .min_field_value = ID_AA64ISAR1_API_IMP_DEF, .matches = has_cpuid_feature, + }, + { + .capability = ARM64_HAS_ADDRESS_AUTH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_address_auth, .cpu_enable = cpu_enable_address_auth, }, { @@ -1633,6 +1651,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, .matches = has_cpuid_feature, }, + { + .capability = ARM64_HAS_GENERIC_AUTH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_generic_auth, + }, #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_PSEUDO_NMI { -- cgit v1.2.3 From 91a1b6ccff323e60615e3118eceb2d8cbc4f69ab Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:50 +0530 Subject: arm64: rename ptrauth key structures to be user-specific We currently enable ptrauth for userspace, but do not use it within the kernel. We're going to enable it for the kernel, and will need to manage a separate set of ptrauth keys for the kernel. We currently keep all 5 keys in struct ptrauth_keys. However, as the kernel will only need to use 1 key, it is a bit wasteful to allocate a whole ptrauth_keys struct for every thread. Therefore, a subsequent patch will define a separate struct, with only 1 key, for the kernel. In preparation for that, rename the existing struct (and associated macros and functions) to reflect that they are specific to userspace. Acked-by: Catalin Marinas Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Re-positioned the patch to reduce the diff] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pointer_auth.h | 12 ++++++------ arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/pointer_auth.c | 8 ++++---- arch/arm64/kernel/ptrace.c | 16 ++++++++-------- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 7a24bad1a58b..799b079e69a5 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -22,7 +22,7 @@ struct ptrauth_key { * We give each process its own keys, which are shared by all threads. The keys * are inherited upon fork(), and reinitialised upon exec*(). */ -struct ptrauth_keys { +struct ptrauth_keys_user { struct ptrauth_key apia; struct ptrauth_key apib; struct ptrauth_key apda; @@ -30,7 +30,7 @@ struct ptrauth_keys { struct ptrauth_key apga; }; -static inline void ptrauth_keys_init(struct ptrauth_keys *keys) +static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { get_random_bytes(&keys->apia, sizeof(keys->apia)); @@ -50,7 +50,7 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_switch(struct ptrauth_keys *keys) +static inline void ptrauth_keys_switch_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { __ptrauth_key_install(APIA, keys->apia); @@ -80,12 +80,12 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) #define ptrauth_thread_init_user(tsk) \ do { \ struct task_struct *__ptiu_tsk = (tsk); \ - ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \ - ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \ + ptrauth_keys_init_user(&__ptiu_tsk->thread.keys_user); \ + ptrauth_keys_switch_user(&__ptiu_tsk->thread.keys_user); \ } while (0) #define ptrauth_thread_switch(tsk) \ - ptrauth_keys_switch(&(tsk)->thread.keys_user) + ptrauth_keys_switch_user(&(tsk)->thread.keys_user) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5ba63204d078..496a92873290 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -146,7 +146,7 @@ struct thread_struct { unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ #ifdef CONFIG_ARM64_PTR_AUTH - struct ptrauth_keys keys_user; + struct ptrauth_keys_user keys_user; #endif }; diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index c507b584259d..af5a638207f8 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -9,7 +9,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) { - struct ptrauth_keys *keys = &tsk->thread.keys_user; + struct ptrauth_keys_user *keys = &tsk->thread.keys_user; unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY; unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY; @@ -18,8 +18,8 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) return -EINVAL; if (!arg) { - ptrauth_keys_init(keys); - ptrauth_keys_switch(keys); + ptrauth_keys_init_user(keys); + ptrauth_keys_switch_user(keys); return 0; } @@ -41,7 +41,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); - ptrauth_keys_switch(keys); + ptrauth_keys_switch_user(keys); return 0; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index cd6e5fa48b9c..b3d3005d9515 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey) } static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, - const struct ptrauth_keys *keys) + const struct ptrauth_keys_user *keys) { ukeys->apiakey = pac_key_to_user(&keys->apia); ukeys->apibkey = pac_key_to_user(&keys->apib); @@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, ukeys->apdbkey = pac_key_to_user(&keys->apdb); } -static void pac_address_keys_from_user(struct ptrauth_keys *keys, +static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, const struct user_pac_address_keys *ukeys) { keys->apia = pac_key_from_user(ukeys->apiakey); @@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_address_keys user_keys; if (!system_supports_address_auth()) @@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_address_keys user_keys; int ret; @@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target, } static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, - const struct ptrauth_keys *keys) + const struct ptrauth_keys_user *keys) { ukeys->apgakey = pac_key_to_user(&keys->apga); } -static void pac_generic_keys_from_user(struct ptrauth_keys *keys, +static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, const struct user_pac_generic_keys *ukeys) { keys->apga = pac_key_from_user(ukeys->apgakey); @@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_generic_keys user_keys; if (!system_supports_generic_auth()) @@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_generic_keys user_keys; int ret; -- cgit v1.2.3 From be129842566599f2c6f8fbba277c098802cd4b3d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:51 +0530 Subject: arm64: install user ptrauth keys at kernel exit time As we're going to enable pointer auth within the kernel and use a different APIAKey for the kernel itself, so move the user APIAKey switch to EL0 exception return. The other 4 keys could remain switched during task switch, but are also moved to keep things consistent. Reviewed-by: Kees Cook Reviewed-by: James Morse Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: commit msg, re-positioned the patch, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 49 +++++++++++++++++++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 23 +-------------- arch/arm64/kernel/asm-offsets.c | 11 +++++++ arch/arm64/kernel/entry.S | 3 ++ arch/arm64/kernel/pointer_auth.c | 3 -- arch/arm64/kernel/process.c | 1 - 6 files changed, 64 insertions(+), 26 deletions(-) create mode 100644 arch/arm64/include/asm/asm_pointer_auth.h (limited to 'arch') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h new file mode 100644 index 000000000000..3482348ec07f --- /dev/null +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ASM_POINTER_AUTH_H +#define __ASM_ASM_POINTER_AUTH_H + +#include +#include +#include +#include + +#ifdef CONFIG_ARM64_PTR_AUTH +/* + * thread.keys_user.ap* as offset exceeds the #imm offset range + * so use the base value of ldp as thread.keys_user and offset as + * thread.keys_user.ap*. + */ + .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + mov \tmp1, #THREAD_KEYS_USER + add \tmp1, \tsk, \tmp1 +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b .Laddr_auth_skip_\@ +alternative_else_nop_endif + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA] + msr_s SYS_APIAKEYLO_EL1, \tmp2 + msr_s SYS_APIAKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB] + msr_s SYS_APIBKEYLO_EL1, \tmp2 + msr_s SYS_APIBKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA] + msr_s SYS_APDAKEYLO_EL1, \tmp2 + msr_s SYS_APDAKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB] + msr_s SYS_APDBKEYLO_EL1, \tmp2 + msr_s SYS_APDBKEYHI_EL1, \tmp3 +.Laddr_auth_skip_\@: +alternative_if ARM64_HAS_GENERIC_AUTH + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA] + msr_s SYS_APGAKEYLO_EL1, \tmp2 + msr_s SYS_APGAKEYHI_EL1, \tmp3 +alternative_else_nop_endif + .endm + +#else /* CONFIG_ARM64_PTR_AUTH */ + + .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + .endm + +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 799b079e69a5..dabe026ca8ca 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -50,19 +50,6 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_switch_user(struct ptrauth_keys_user *keys) -{ - if (system_supports_address_auth()) { - __ptrauth_key_install(APIA, keys->apia); - __ptrauth_key_install(APIB, keys->apib); - __ptrauth_key_install(APDA, keys->apda); - __ptrauth_key_install(APDB, keys->apdb); - } - - if (system_supports_generic_auth()) - __ptrauth_key_install(APGA, keys->apga); -} - extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -78,20 +65,12 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) } #define ptrauth_thread_init_user(tsk) \ -do { \ - struct task_struct *__ptiu_tsk = (tsk); \ - ptrauth_keys_init_user(&__ptiu_tsk->thread.keys_user); \ - ptrauth_keys_switch_user(&__ptiu_tsk->thread.keys_user); \ -} while (0) - -#define ptrauth_thread_switch(tsk) \ - ptrauth_keys_switch_user(&(tsk)->thread.keys_user) + ptrauth_keys_init_user(&(tsk)->thread.keys_user) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) -#define ptrauth_thread_switch(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index a5bdce8af65b..7b1ea2aece58 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -40,6 +40,9 @@ int main(void) #endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); +#endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); @@ -127,6 +130,14 @@ int main(void) #ifdef CONFIG_ARM_SDE_INTERFACE DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs)); DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority)); +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); + DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib)); + DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); + DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); + DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); + BLANK(); #endif return 0; } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9461d812ae27..684e475bfda0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -341,6 +342,8 @@ alternative_else_nop_endif msr cntkctl_el1, x1 4: #endif + ptrauth_keys_install_user tsk, x0, x1, x2 + apply_ssbd 0, x0, x1 .endif diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index af5a638207f8..1e77736a4f66 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -19,7 +19,6 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (!arg) { ptrauth_keys_init_user(keys); - ptrauth_keys_switch_user(keys); return 0; } @@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); - ptrauth_keys_switch_user(keys); - return 0; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 00626057a384..6140e791bf92 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -512,7 +512,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); - ptrauth_thread_switch(next); ssbs_thread_switch(next); /* -- cgit v1.2.3 From df3551011b8188e7a9291a66c2c0a04c4eb9d8eb Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:52 +0530 Subject: arm64: ptrauth: Add bootup/runtime flags for __cpu_setup This patch allows __cpu_setup to be invoked with one of these flags, ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. This is required as some cpufeatures need different handling during different scenarios. The input parameter in x0 is preserved till the end to be used inside this function. There should be no functional change with this patch and is useful for the subsequent ptrauth patch which utilizes it. Some upcoming arm cpufeatures can also utilize these flags. Suggested-by: James Morse Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Reviewed-by: James Morse Reviewed-by: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/smp.h | 8 ++++++++ arch/arm64/kernel/head.S | 2 ++ arch/arm64/kernel/sleep.S | 2 ++ arch/arm64/mm/proc.S | 26 +++++++++++++++----------- 4 files changed, 27 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b65259..8d66497d8157 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -23,6 +23,14 @@ #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) +/* Possible options for __cpu_setup */ +/* Option to setup primary cpu */ +#define ARM64_CPU_BOOT_PRIMARY (1) +/* Option to setup secondary cpus */ +#define ARM64_CPU_BOOT_SECONDARY (2) +/* Option to setup cpus for different cpu run time services */ +#define ARM64_CPU_RUNTIME (3) + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..797573fe0e9c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -118,6 +118,7 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ + mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) @@ -712,6 +713,7 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva + mov x0, #ARM64_CPU_BOOT_SECONDARY bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index f5b04dd8a710..7b2f2e650c44 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -3,6 +3,7 @@ #include #include #include +#include .text /* @@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..ea0db1744c29 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -408,30 +408,30 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Input: + * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 + mov x1, #3 << 20 + msr cpacr_el1, x1 // Enable FP/ASIMD + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 /* * Memory region attributes */ mov_q x5, MAIR_EL1_SET msr mair_el1, x5 - /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. @@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + /* + * Prepare SCTLR + */ + mov_q x0, SCTLR_EL1_SET ret // return to head.S SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From 8c176e1625a66d35362d4eac7ceab55c1229b481 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:53 +0530 Subject: arm64: cpufeature: Move cpu capability helpers inside C file These helpers are used only by functions inside cpufeature.c and hence makes sense to be moved from cpufeature.h to cpufeature.c as they are not expected to be used globally. This change helps in reducing the header file size as well as to add future cpu capability types without confusion. Only a cpu capability type macro is sufficient to expose those capabilities globally. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 12 ------------ arch/arm64/kernel/cpufeature.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8c8048372c8e..cea3c1cdf252 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -340,18 +340,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) return cap->type & ARM64_CPUCAP_SCOPE_MASK; } -static inline bool -cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); -} - -static inline bool -cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); -} - /* * Generic helper for handling capabilties with multiple (match,enable) pairs * of call backs, sharing the same capability bit. diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 01f50f043831..04ecf1cc3306 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1363,6 +1363,19 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, } #endif +/* Internal helper functions to match cpu capability type */ +static bool +cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); +} + +static bool +cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", -- cgit v1.2.3 From deeaac5175a577cbbe1a2319903781d0a7ef7720 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:54 +0530 Subject: arm64: cpufeature: handle conflicts based on capability Each system capability can be of either boot, local, or system scope, depending on when the state of the capability is finalized. When we detect a conflict on a late CPU, we either offline the CPU or panic the system. We currently always panic if the conflict is caused by a boot scope capability, and offline the CPU if the conflict is caused by a local or system scope capability. We're going to want to add a new capability (for pointer authentication) which needs to be boot scope but doesn't need to panic the system when a conflict is detected. So add a new flag to specify whether the capability requires the system to panic or not. Current boot scope capabilities are updated to set the flag, so there should be no functional change as a result of this patch. Signed-off-by: Amit Daniel Kachhap Signed-off-by: Kristina Martsenko Reviewed-by: Vincenzo Frascino Reviewed-by: Suzuki K Poulose Reviewed-by: Kees Cook Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 12 ++++++++++-- arch/arm64/kernel/cpufeature.c | 29 +++++++++++++++-------------- 2 files changed, 25 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cea3c1cdf252..46388e65bbcd 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; * In some non-typical cases either both (a) and (b), or neither, * should be permitted. This can be described by including neither * or both flags in the capability's type field. + * + * In case of a conflict, the CPU is prevented from booting. If the + * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability, + * then a kernel panic is triggered. */ @@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) /* Is it safe for a late CPU to miss this capability when system has it */ #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) +/* Panic when a conflict is detected */ +#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6)) /* * CPU errata workarounds that need to be enabled at boot time if one or @@ -279,9 +285,11 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU feature used early in the boot based on the boot CPU. All secondary - * CPUs must match the state of the capability as detected by the boot CPU. + * CPUs must match the state of the capability as detected by the boot CPU. In + * case of a conflict, a kernel panic is triggered. */ -#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU +#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) struct arm64_cpu_capabilities { const char *desc; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04ecf1cc3306..d6033f45c3cd 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1376,6 +1376,12 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); } +static bool +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -2018,10 +2024,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask) * Run through the list of capabilities to check for conflicts. * If the system has already detected a capability, take necessary * action on this CPU. - * - * Returns "false" on conflicts. */ -static bool verify_local_cpu_caps(u16 scope_mask) +static void verify_local_cpu_caps(u16 scope_mask) { int i; bool cpu_has_cap, system_has_cap; @@ -2066,10 +2070,12 @@ static bool verify_local_cpu_caps(u16 scope_mask) pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", smp_processor_id(), caps->capability, caps->desc, system_has_cap, cpu_has_cap); - return false; - } - return true; + if (cpucap_panic_on_conflict(caps)) + cpu_panic_kernel(); + else + cpu_die_early(); + } } /* @@ -2079,12 +2085,8 @@ static bool verify_local_cpu_caps(u16 scope_mask) static void check_early_cpu_features(void) { verify_cpu_asid_bits(); - /* - * Early features are used by the kernel already. If there - * is a conflict, we cannot proceed further. - */ - if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) - cpu_panic_kernel(); + + verify_local_cpu_caps(SCOPE_BOOT_CPU); } static void @@ -2132,8 +2134,7 @@ static void verify_local_cpu_capabilities(void) * check_early_cpu_features(), as they need to be verified * on all secondary CPUs. */ - if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) - cpu_die_early(); + verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); verify_local_elf_hwcaps(arm64_elf_hwcaps); -- cgit v1.2.3 From 6982934e19f8ebb4152ba77308facdb1a38533f9 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:55 +0530 Subject: arm64: enable ptrauth earlier When the kernel is compiled with pointer auth instructions, the boot CPU needs to start using address auth very early, so change the cpucap to account for this. Pointer auth must be enabled before we call C functions, because it is not possible to enter a function with pointer auth disabled and exit it with pointer auth enabled. Note, mismatches between architected and IMPDEF algorithms will still be caught by the cpufeature framework (the separate *_ARCH and *_IMP_DEF cpucaps). Note the change in behavior: if the boot CPU has address auth and a late CPU does not, then the late CPU is parked by the cpufeature framework. This is possible as kernel will only have NOP space intructions for PAC so such mismatched late cpu will silently ignore those instructions in C functions. Also, if the boot CPU does not have address auth and the late CPU has then the late cpu will still boot but with ptrauth feature disabled. Leave generic authentication as a "system scope" cpucap for now, since initially the kernel will only use address authentication. Reviewed-by: Kees Cook Reviewed-by: Suzuki K Poulose Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Re-worked ptrauth setup logic, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 6 ++++++ arch/arm64/include/asm/cpufeature.h | 9 +++++++++ arch/arm64/kernel/cpufeature.c | 13 +++---------- arch/arm64/mm/proc.S | 31 +++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..87e2cbb76930 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1515,6 +1515,12 @@ config ARM64_PTR_AUTH be enabled. However, KVM guest also require VHE mode and hence CONFIG_ARM64_VHE=y option to use this feature. + If the feature is present on the boot CPU but not on a late CPU, then + the late CPU will be parked. Also, if the boot CPU does not have + address auth and the late CPU has then the late CPU will still boot + but with the feature disabled. On such a system, this option should + not be selected. + endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 46388e65bbcd..2b5a088053e4 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -291,6 +291,15 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) +/* + * CPU feature used early in the boot based on the boot CPU. It is safe for a + * late CPU to have this feature even though the boot CPU hasn't enabled it, + * although the feature will not be used by Linux in this case. If the boot CPU + * has enabled this feature already, then every late CPU must have it. + */ +#define ARM64_CPUCAP_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) + struct arm64_cpu_capabilities { const char *desc; u16 capability; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d6033f45c3cd..f6c0cb755107 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1318,12 +1318,6 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) #endif /* CONFIG_ARM64_RAS_EXTN */ #ifdef CONFIG_ARM64_PTR_AUTH -static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) -{ - sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); -} - static bool has_address_auth(const struct arm64_cpu_capabilities *entry, int __unused) { @@ -1627,7 +1621,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Address authentication (architected algorithm)", .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .sys_reg = SYS_ID_AA64ISAR1_EL1, .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_APA_SHIFT, @@ -1637,7 +1631,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Address authentication (IMP DEF algorithm)", .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .sys_reg = SYS_ID_AA64ISAR1_EL1, .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_API_SHIFT, @@ -1646,9 +1640,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, { .capability = ARM64_HAS_ADDRESS_AUTH, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .matches = has_address_auth, - .cpu_enable = cpu_enable_address_auth, }, { .desc = "Generic authentication (architected algorithm)", diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index ea0db1744c29..4cf19a26af2d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K @@ -468,9 +469,39 @@ SYM_FUNC_START(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + mov x1, x0 /* * Prepare SCTLR */ mov_q x0, SCTLR_EL1_SET + +#ifdef CONFIG_ARM64_PTR_AUTH + /* No ptrauth setup for run time cpus */ + cmp x1, #ARM64_CPU_RUNTIME + b.eq 3f + + /* Check if the CPU supports ptrauth */ + mrs x2, id_aa64isar1_el1 + ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 + cbz x2, 3f + + msr_s SYS_APIAKEYLO_EL1, xzr + msr_s SYS_APIAKEYHI_EL1, xzr + + /* Just enable ptrauth for primary cpu */ + cmp x1, #ARM64_CPU_BOOT_PRIMARY + b.eq 2f + + /* if !system_supports_address_auth() then skip enable */ +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b 3f +alternative_else_nop_endif + +2: /* Enable ptrauth instructions */ + ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB + orr x0, x0, x2 +3: +#endif ret // return to head.S SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From 33e45234987ea3ed4b05fc512f4441696478f12d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:56 +0530 Subject: arm64: initialize and switch ptrauth kernel keys Set up keys to use pointer authentication within the kernel. The kernel will be compiled with APIAKey instructions, the other keys are currently unused. Each task is given its own APIAKey, which is initialized during fork. The key is changed during context switch and on kernel entry from EL0. The keys for idle threads need to be set before calling any C functions, because it is not possible to enter and exit a function with different keys. Reviewed-by: Kees Cook Reviewed-by: Catalin Marinas Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Modified secondary cores key structure, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 14 ++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 13 +++++++++++++ arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/smp.h | 4 ++++ arch/arm64/kernel/asm-offsets.c | 5 +++++ arch/arm64/kernel/entry.S | 3 +++ arch/arm64/kernel/process.c | 2 ++ arch/arm64/kernel/smp.c | 8 ++++++++ arch/arm64/mm/proc.S | 12 ++++++++++++ 9 files changed, 62 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index 3482348ec07f..d3f4aee42851 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -39,11 +39,25 @@ alternative_if ARM64_HAS_GENERIC_AUTH alternative_else_nop_endif .endm + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 +alternative_if ARM64_HAS_ADDRESS_AUTH + mov \tmp1, #THREAD_KEYS_KERNEL + add \tmp1, \tsk, \tmp1 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA] + msr_s SYS_APIAKEYLO_EL1, \tmp2 + msr_s SYS_APIAKEYHI_EL1, \tmp3 + isb +alternative_else_nop_endif + .endm + #else /* CONFIG_ARM64_PTR_AUTH */ .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 .endm + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .endm + #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index dabe026ca8ca..aa956ca5f2c2 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -30,6 +30,10 @@ struct ptrauth_keys_user { struct ptrauth_key apga; }; +struct ptrauth_keys_kernel { + struct ptrauth_key apia; +}; + static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { @@ -50,6 +54,12 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) +static inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) +{ + if (system_supports_address_auth()) + get_random_bytes(&keys->apia, sizeof(keys->apia)); +} + extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -66,11 +76,14 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) #define ptrauth_thread_init_user(tsk) \ ptrauth_keys_init_user(&(tsk)->thread.keys_user) +#define ptrauth_thread_init_kernel(tsk) \ + ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) +#define ptrauth_thread_init_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 496a92873290..4c77da5dc819 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -147,6 +147,7 @@ struct thread_struct { struct debug_info debug; /* debugging */ #ifdef CONFIG_ARM64_PTR_AUTH struct ptrauth_keys_user keys_user; + struct ptrauth_keys_kernel keys_kernel; #endif }; diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 8d66497d8157..40d5ba029615 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -38,6 +38,7 @@ #include #include #include +#include DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); @@ -95,6 +96,9 @@ asmlinkage void secondary_start_kernel(void); struct secondary_data { void *stack; struct task_struct *task; +#ifdef CONFIG_ARM64_PTR_AUTH + struct ptrauth_keys_kernel ptrauth_key; +#endif long status; }; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7b1ea2aece58..9981a0a5a87f 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -42,6 +42,7 @@ int main(void) DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); + DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); #endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); @@ -91,6 +92,9 @@ int main(void) BLANK(); DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key)); +#endif BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); @@ -137,6 +141,7 @@ int main(void) DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); + DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); BLANK(); #endif return 0; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 684e475bfda0..3dad2d000e3c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -178,6 +178,7 @@ alternative_cb_end apply_ssbd 1, x22, x23 + ptrauth_keys_install_kernel tsk, x20, x22, x23 .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -342,6 +343,7 @@ alternative_else_nop_endif msr cntkctl_el1, x1 4: #endif + /* No kernel C function calls after this as user keys are set. */ ptrauth_keys_install_user tsk, x0, x1, x2 apply_ssbd 0, x0, x1 @@ -898,6 +900,7 @@ ENTRY(cpu_switch_to) ldr lr, [x8] mov sp, x9 msr sp_el0, x1 + ptrauth_keys_install_kernel x1, x8, x9, x10 ret ENDPROC(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6140e791bf92..7db0302bec00 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, */ fpsimd_flush_task_state(p); + ptrauth_thread_init_kernel(p); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..08903413f106 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -112,6 +112,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) */ secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; +#if defined(CONFIG_ARM64_PTR_AUTH) + secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo; + secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi; +#endif update_cpu_boot_status(CPU_MMU_OFF); __flush_dcache_area(&secondary_data, sizeof(secondary_data)); @@ -138,6 +142,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = NULL; secondary_data.stack = NULL; +#if defined(CONFIG_ARM64_PTR_AUTH) + secondary_data.ptrauth_key.apia.lo = 0; + secondary_data.ptrauth_key.apia.hi = 0; +#endif __flush_dcache_area(&secondary_data, sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (ret && status) { diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4cf19a26af2d..5a11a895e923 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -485,6 +485,10 @@ SYM_FUNC_START(__cpu_setup) ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 cbz x2, 3f + /* + * The primary cpu keys are reset here and can be + * re-initialised with some proper values later. + */ msr_s SYS_APIAKEYLO_EL1, xzr msr_s SYS_APIAKEYHI_EL1, xzr @@ -497,6 +501,14 @@ alternative_if_not ARM64_HAS_ADDRESS_AUTH b 3f alternative_else_nop_endif + /* Install ptrauth key for secondary cpus */ + adr_l x2, secondary_data + ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task + cbz x3, 2f // check for slow booting cpus + ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY] + msr_s SYS_APIAKEYLO_EL1, x3 + msr_s SYS_APIAKEYHI_EL1, x4 + 2: /* Enable ptrauth instructions */ ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB -- cgit v1.2.3 From 28321582334c261c13b20d7efe634e610b4c100b Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:57 +0530 Subject: arm64: initialize ptrauth keys for kernel booting task This patch uses the existing boot_init_stack_canary arch function to initialize the ptrauth keys for the booting task in the primary core. The requirement here is that it should be always inline and the caller must never return. As pointer authentication too detects a subset of stack corruption so it makes sense to place this code here. Both pointer authentication and stack canary codes are protected by their respective config option. Suggested-by: Ard Biesheuvel Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pointer_auth.h | 11 ++++++++++- arch/arm64/include/asm/stackprotector.h | 5 +++++ include/linux/stackprotector.h | 2 +- 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index aa956ca5f2c2..833d3f948de0 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -54,12 +54,18 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) +static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) { if (system_supports_address_auth()) get_random_bytes(&keys->apia, sizeof(keys->apia)); } +static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys) +{ + if (system_supports_address_auth()) + __ptrauth_key_install(APIA, keys->apia); +} + extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -78,12 +84,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) ptrauth_keys_init_user(&(tsk)->thread.keys_user) #define ptrauth_thread_init_kernel(tsk) \ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) +#define ptrauth_thread_switch_kernel(tsk) \ + ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) #define ptrauth_thread_init_kernel(tsk) +#define ptrauth_thread_switch_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 5884a2b02827..7263e0bac680 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -15,6 +15,7 @@ #include #include +#include extern unsigned long __stack_chk_guard; @@ -26,6 +27,7 @@ extern unsigned long __stack_chk_guard; */ static __always_inline void boot_init_stack_canary(void) { +#if defined(CONFIG_STACKPROTECTOR) unsigned long canary; /* Try to get a semi random initial value. */ @@ -36,6 +38,9 @@ static __always_inline void boot_init_stack_canary(void) current->stack_canary = canary; if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) __stack_chk_guard = current->stack_canary; +#endif + ptrauth_thread_init_kernel(current); + ptrauth_thread_switch_kernel(current); } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h index 6b792d080eee..4c678c4fec58 100644 --- a/include/linux/stackprotector.h +++ b/include/linux/stackprotector.h @@ -6,7 +6,7 @@ #include #include -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH) # include #else static inline void boot_init_stack_canary(void) -- cgit v1.2.3 From 689eae42afd7a916634146edca38463769969184 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:58 +0530 Subject: arm64: mask PAC bits of __builtin_return_address Functions like vmap() record how much memory has been allocated by their callers, and callers are identified using __builtin_return_address(). Once the kernel is using pointer-auth the return address will be signed. This means it will not match any kernel symbol, and will vary between threads even for the same caller. The output of /proc/vmallocinfo in this case may look like, 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0xc5c78000100e7c60 pages=4 vmalloc N0=4 The above three 64bit values should be the same symbol name and not different LR values. Use the pre-processor to add logic to clear the PAC to __builtin_return_address() callers. This patch adds a new file asm/compiler.h and is transitively included via include/compiler_types.h on the compiler command line so it is guaranteed to be loaded and the users of this macro will not find a wrong version. Helper macros ptrauth_kernel_pac_mask/ptrauth_clear_pac are created for this purpose and added in this file. Existing macro ptrauth_user_pac_mask moved from asm/pointer_auth.h. Signed-off-by: Amit Daniel Kachhap Reviewed-by: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/compiler.h | 24 ++++++++++++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 9 +-------- 3 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 arch/arm64/include/asm/compiler.h (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 87e2cbb76930..115ceea0293e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -118,6 +118,7 @@ config ARM64 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_COMPILER_H select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h new file mode 100644 index 000000000000..eece20d2c55f --- /dev/null +++ b/arch/arm64/include/asm/compiler.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_COMPILER_H +#define __ASM_COMPILER_H + +#if defined(CONFIG_ARM64_PTR_AUTH) + +/* + * The EL0/EL1 pointer bits used by a pointer authentication code. + * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. + */ +#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) +#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) + +/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ +#define ptrauth_clear_pac(ptr) \ + ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ + (ptr & ~ptrauth_user_pac_mask())) + +#define __builtin_return_address(val) \ + (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) + +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_COMPILER_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 833d3f948de0..70c47156e54b 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -68,16 +68,9 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); -/* - * The EL0 pointer bits used by a pointer authentication code. - * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. - */ -#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual) - -/* Only valid for EL0 TTBR0 instruction pointers */ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) { - return ptr & ~ptrauth_user_pac_mask(); + return ptrauth_clear_pac(ptr); } #define ptrauth_thread_init_user(tsk) \ -- cgit v1.2.3 From 04ad99a0b160450ae615e41b839e444eccb5c99b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2020 14:34:59 +0530 Subject: arm64: unwind: strip PAC from kernel addresses When we enable pointer authentication in the kernel, LR values saved to the stack will have a PAC which we must strip in order to retrieve the real return address. Strip PACs when unwinding the stack in order to account for this. When function graph tracer is used with patchable-function-entry then return_to_handler will also have pac bits so strip it too. Reviewed-by: Kees Cook Acked-by: Catalin Marinas Reviewed-by: James Morse Signed-off-by: Mark Rutland Signed-off-by: Kristina Martsenko [Amit: Re-position ptrauth_strip_insn_pac, comment] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index a336cb124320..139679c745bf 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (frame->pc == (unsigned long)return_to_handler)) { + (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { struct ftrace_ret_stack *ret_stack; /* * This is a case where function graph tracer has @@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + frame->pc = ptrauth_strip_insn_pac(frame->pc); + /* * Frames created upon entry from EL0 have NULL FP and PC values, so * don't bother reporting these. Frames created by __noreturn functions -- cgit v1.2.3 From cdcb61ae4c56f9edcd1eca4c2df444f3f5e96e1d Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:35:00 +0530 Subject: arm64: __show_regs: strip PAC from lr in printk lr is printed with %pS which will try to find an entry in kallsyms. After enabling pointer authentication, this match will fail due to PAC present in the lr. Strip PAC from the lr to display the correct symbol name. Suggested-by: James Morse Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Acked-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7db0302bec00..cacae291ba27 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs) if (!user_mode(regs)) { printk("pc : %pS\n", (void *)regs->pc); - printk("lr : %pS\n", (void *)lr); + printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); } else { printk("pc : %016llx\n", regs->pc); printk("lr : %016llx\n", lr); -- cgit v1.2.3 From e51f5f56dd69e009e22af8a4354dce0817a7addb Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:35:01 +0530 Subject: arm64: suspend: restore the kernel ptrauth keys This patch restores the kernel keys from current task during cpu resume after the mmu is turned on and ptrauth is enabled. A flag is added in macro ptrauth_keys_install_kernel to check if isb instruction needs to be executed. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 6 ++++-- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/mm/proc.S | 2 ++ 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index d3f4aee42851..ce2a8486992b 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -39,14 +39,16 @@ alternative_if ARM64_HAS_GENERIC_AUTH alternative_else_nop_endif .endm - .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3 alternative_if ARM64_HAS_ADDRESS_AUTH mov \tmp1, #THREAD_KEYS_KERNEL add \tmp1, \tsk, \tmp1 ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA] msr_s SYS_APIAKEYLO_EL1, \tmp2 msr_s SYS_APIAKEYHI_EL1, \tmp3 + .if \sync == 1 isb + .endif alternative_else_nop_endif .endm @@ -55,7 +57,7 @@ alternative_else_nop_endif .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 .endm - .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3 .endm #endif /* CONFIG_ARM64_PTR_AUTH */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3dad2d000e3c..6273d7bed962 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -178,7 +178,7 @@ alternative_cb_end apply_ssbd 1, x22, x23 - ptrauth_keys_install_kernel tsk, x20, x22, x23 + ptrauth_keys_install_kernel tsk, 1, x20, x22, x23 .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -900,7 +900,7 @@ ENTRY(cpu_switch_to) ldr lr, [x8] mov sp, x9 msr sp_el0, x1 - ptrauth_keys_install_kernel x1, x8, x9, x10 + ptrauth_keys_install_kernel x1, 1, x8, x9, x10 ret ENDPROC(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 5a11a895e923..4450dc83cf5c 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif + ptrauth_keys_install_kernel x14, 0, x1, x2, x3 isb ret SYM_FUNC_END(cpu_do_resume) -- cgit v1.2.3 From 74afda4016a7437e6e425c3370e4b93b47be8ddf Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:35:03 +0530 Subject: arm64: compile the kernel with ptrauth return address signing Compile all functions with two ptrauth instructions: PACIASP in the prologue to sign the return address, and AUTIASP in the epilogue to authenticate the return address (from the stack). If authentication fails, the return will cause an instruction abort to be taken, followed by an oops and killing the task. This should help protect the kernel against attacks using return-oriented programming. As ptrauth protects the return address, it can also serve as a replacement for CONFIG_STACKPROTECTOR, although note that it does not protect other parts of the stack. The new instructions are in the HINT encoding space, so on a system without ptrauth they execute as NOPs. CONFIG_ARM64_PTR_AUTH now not only enables ptrauth for userspace and KVM guests, but also automatically builds the kernel with ptrauth instructions if the compiler supports it. If there is no compiler support, we do not warn that the kernel was built without ptrauth instructions. GCC 7 and 8 support the -msign-return-address option, while GCC 9 deprecates that option and replaces it with -mbranch-protection. Support both options. Clang uses an external assembler hence this patch makes sure that the correct parameters (-march=armv8.3-a) are passed down to help it recognize the ptrauth instructions. Ftrace function tracer works properly with Ptrauth only when patchable-function-entry feature is present and is ensured by the Kconfig dependency. Cc: Catalin Marinas Cc: Will Deacon Cc: Masahiro Yamada Reviewed-by: Kees Cook Reviewed-by: Vincenzo Frascino # not co-dev parts Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Cover leaf function, comments, Ftrace Kconfig] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 24 +++++++++++++++++++++++- arch/arm64/Makefile | 11 +++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 115ceea0293e..155041a5f0e4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1499,6 +1499,8 @@ config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y depends on !KVM || ARM64_VHE + depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Pointer authentication (part of the ARMv8.3 Extensions) provides instructions for signing and authenticating pointers against secret @@ -1506,11 +1508,17 @@ config ARM64_PTR_AUTH and other attacks. This option enables these instructions at EL0 (i.e. for userspace). - Choosing this option will cause the kernel to initialise secret keys for each process at exec() time, with these keys being context-switched along with the process. + If the compiler supports the -mbranch-protection or + -msign-return-address flag (e.g. GCC 7 or later), then this option + will also cause the kernel itself to be compiled with return address + protection. In this case, and if the target hardware is known to + support pointer authentication, then CONFIG_STACKPROTECTOR can be + disabled with minimal loss of protection. + The feature is detected at runtime. If the feature is not present in hardware it will not be advertised to userspace/KVM guest nor will it be enabled. However, KVM guest also require VHE mode and hence @@ -1522,6 +1530,20 @@ config ARM64_PTR_AUTH but with the feature disabled. On such a system, this option should not be selected. + This feature works with FUNCTION_GRAPH_TRACER option only if + DYNAMIC_FTRACE_WITH_REGS is enabled. + +config CC_HAS_BRANCH_PROT_PAC_RET + # GCC 9 or later, clang 8 or later + def_bool $(cc-option,-mbranch-protection=pac-ret+leaf) + +config CC_HAS_SIGN_RETURN_ADDRESS + # GCC 7, 8 + def_bool $(cc-option,-msign-return-address=all) + +config AS_HAS_PAC + def_bool $(as-option,-Wa$(comma)-march=armv8.3-a) + endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index dca1a97751ab..f15f92ba53e6 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -65,6 +65,17 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +ifeq ($(CONFIG_ARM64_PTR_AUTH),y) +branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all +branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf +# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the +# compiler to generate them and consequently to break the single image contract +# we pass it only to the assembler. This option is utilized only in case of non +# integrated assemblers. +branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a +KBUILD_CFLAGS += $(branch-prot-flags-y) +endif + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ -- cgit v1.2.3 From 3b446c7d27ddd06342901bb35211363f6944291a Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 19 Mar 2020 11:19:51 -0700 Subject: arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH Clang relies on GNU as from binutils to assemble the Linux kernel, currently. A recent patch to enable the armv8.3-a extension for pointer authentication checked for compiler support of the relevant flags. Everything works with binutils 2.34+, but for older versions we observe assembler errors: /tmp/vgettimeofday-36a54b.s: Assembler messages: /tmp/vgettimeofday-36a54b.s:40: Error: unknown pseudo-op: `.cfi_negate_ra_state' When compiling with Clang, require the assembler to support .cfi_negate_ra_state directives, in order to support CONFIG_ARM64_PTR_AUTH. Link: https://github.com/ClangBuiltLinux/linux/issues/938 Signed-off-by: Nick Desaulniers Signed-off-by: Catalin Marinas Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor --- arch/arm64/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 155041a5f0e4..c876afce10f3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1500,6 +1500,7 @@ config ARM64_PTR_AUTH default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Pointer authentication (part of the ARMv8.3 Extensions) provides @@ -1544,6 +1545,9 @@ config CC_HAS_SIGN_RETURN_ADDRESS config AS_HAS_PAC def_bool $(as-option,-Wa$(comma)-march=armv8.3-a) +config AS_HAS_CFI_NEGATE_RA_STATE + def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) + endmenu menu "ARMv8.5 architectural features" -- cgit v1.2.3 From dc374b477f9233296bf2da31854e486e1781169e Mon Sep 17 00:00:00 2001 From: Remi Denis-Courmont Date: Wed, 4 Mar 2020 11:36:31 +0200 Subject: arm64: use mov_q instead of literal ldr In practice, this requires only 2 instructions, or even only 1 for the idmap_pg_dir size (with 4 or 64 KiB pages). Only the MAIR values needed more than 2 instructions and it was already converted to mov_q by 95b3f74bec203804658e17f86fe20755bb8abcb9. Signed-off-by: Remi Denis-Courmont Signed-off-by: Catalin Marinas Acked-by: Mark Rutland --- arch/arm64/kernel/cpu-reset.S | 2 +- arch/arm64/kernel/hyp-stub.S | 2 +- arch/arm64/kernel/relocate_kernel.S | 4 +--- arch/arm64/kvm/hyp-init.S | 10 ++++------ arch/arm64/mm/proc.S | 2 +- 5 files changed, 8 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 32c7bf858dd9..38087b4c0432 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -32,7 +32,7 @@ ENTRY(__cpu_soft_restart) /* Clear sctlr_el1 flags. */ mrs x12, sctlr_el1 - ldr x13, =SCTLR_ELx_FLAGS + mov_q x13, SCTLR_ELx_FLAGS bic x12, x12, x13 pre_disable_mmu_workaround msr sctlr_el1, x12 diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 73d46070b315..e473ead806ed 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -63,7 +63,7 @@ el1_sync: beq 9f // Nothing to reset! /* Someone called kvm_call_hyp() against the hyp-stub... */ - ldr x0, =HVC_STUB_ERR + mov_q x0, HVC_STUB_ERR eret 9: mov x0, xzr diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index c1d7db71a726..c40ce496c78b 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel) cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 - ldr x1, =SCTLR_ELx_FLAGS + mov_q x1, SCTLR_ELx_FLAGS bic x0, x0, x1 pre_disable_mmu_workaround msr sctlr_el2, x0 @@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel) ENDPROC(arm64_relocate_new_kernel) -.ltorg - .align 3 /* To keep the 64-bit values below naturally aligned. */ .Lcopy_end: diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 84f32cf5abc7..6e6ed5581eed 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -60,7 +60,7 @@ alternative_else_nop_endif msr ttbr0_el2, x4 mrs x4, tcr_el1 - ldr x5, =TCR_EL2_MASK + mov_q x5, TCR_EL2_MASK and x4, x4, x5 mov x5, #TCR_EL2_RES1 orr x4, x4, x5 @@ -102,7 +102,7 @@ alternative_else_nop_endif * as well as the EE bit on BE. Drop the A flag since the compiler * is allowed to generate unaligned accesses. */ - ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) + mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) CPU_BE( orr x4, x4, #SCTLR_ELx_EE) msr sctlr_el2, x4 isb @@ -142,7 +142,7 @@ reset: * case we coming via HVC_SOFT_RESTART. */ mrs x5, sctlr_el2 - ldr x6, =SCTLR_ELx_FLAGS + mov_q x6, SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc pre_disable_mmu_workaround msr sctlr_el2, x5 @@ -155,11 +155,9 @@ reset: eret 1: /* Bad stub call */ - ldr x0, =HVC_STUB_ERR + mov_q x0, HVC_STUB_ERR eret SYM_CODE_END(__kvm_handle_stub_hvc) - .ltorg - .popsection diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..eb2ad5753887 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -436,7 +436,7 @@ SYM_FUNC_START(__cpu_setup) * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS tcr_clear_errata_bits x10, x9, x5 -- cgit v1.2.3 From 6cf9a2dce6bd10cf454cf6299c1c23182cb486e7 Mon Sep 17 00:00:00 2001 From: Remi Denis-Courmont Date: Thu, 12 Mar 2020 11:40:02 +0200 Subject: arm64: move kimage_vaddr to .rodata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This datum is not referenced from .idmap.text: it does not need to be mapped in idmap. Lets move it to .rodata as it is never written to after early boot of the primary CPU. (Maybe .data.ro_after_init would be cleaner though?) Signed-off-by: Rémi Denis-Courmont Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a06727354fad..1923802ea962 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -458,17 +458,19 @@ SYM_FUNC_START_LOCAL(__primary_switched) b start_kernel SYM_FUNC_END(__primary_switched) + .pushsection ".rodata", "a" +SYM_DATA_START(kimage_vaddr) + .quad _text - TEXT_OFFSET +SYM_DATA_END(kimage_vaddr) +EXPORT_SYMBOL(kimage_vaddr) + .popsection + /* * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ .section ".idmap.text","awx" -SYM_DATA_START(kimage_vaddr) - .quad _text - TEXT_OFFSET -SYM_DATA_END(kimage_vaddr) -EXPORT_SYMBOL(kimage_vaddr) - /* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. -- cgit v1.2.3 From 7fec52bf8095a95d7f698e9a165d4cace514a204 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:42 +1100 Subject: arm64: Declare ACPI parking protocol CPU operation if needed It's obvious we needn't declare the corresponding CPU operation when CONFIG_ARM64_ACPI_PARKING_PROTOCOL is disabled, even it doesn't cause any compiling warnings. Signed-off-by: Gavin Shan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu_ops.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index 7e07072757af..2082cfb1be86 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -15,7 +15,9 @@ #include extern const struct cpu_operations smp_spin_table_ops; +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL extern const struct cpu_operations acpi_parking_protocol_ops; +#endif extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; -- cgit v1.2.3 From 6885fb129be30c627eb2f5b1498dba498ff6c037 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:43 +1100 Subject: arm64: Rename cpu_read_ops() to init_cpu_ops() This renames cpu_read_ops() to init_cpu_ops() as the function is only called in initialization phase. Also, we will introduce get_cpu_ops() in the subsequent patches, to retireve the CPU operation by the given CPU index. The usage of cpu_read_ops() and get_cpu_ops() are difficult to be distinguished from their names. Signed-off-by: Gavin Shan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu_ops.h | 6 +++--- arch/arm64/kernel/cpu_ops.c | 2 +- arch/arm64/kernel/setup.c | 2 +- arch/arm64/kernel/smp.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 86aabf1e0199..baa13b5db2ca 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -56,11 +56,11 @@ struct cpu_operations { }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; -int __init cpu_read_ops(int cpu); +int __init init_cpu_ops(int cpu); -static inline void __init cpu_read_bootcpu_ops(void) +static inline void __init init_bootcpu_ops(void) { - cpu_read_ops(0); + init_cpu_ops(0); } #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index 2082cfb1be86..a6c3c816b618 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -96,7 +96,7 @@ static const char *__init cpu_read_enable_method(int cpu) /* * Read a cpu's enable method and record it in cpu_ops. */ -int __init cpu_read_ops(int cpu) +int __init init_cpu_ops(int cpu) { const char *enable_method = cpu_read_enable_method(cpu); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index a34890bf309f..f66bd260cce8 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -344,7 +344,7 @@ void __init setup_arch(char **cmdline_p) else psci_acpi_init(); - cpu_read_bootcpu_ops(); + init_bootcpu_ops(); smp_init_cpus(); smp_build_mpidr_hash(); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..6f8477d7f3be 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -488,7 +488,7 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) */ static int __init smp_cpu_setup(int cpu) { - if (cpu_read_ops(cpu)) + if (init_cpu_ops(cpu)) return -ENODEV; if (cpu_ops[cpu]->cpu_init(cpu)) -- cgit v1.2.3 From de58ed5e16e62f36c7ed05552f18b7f9c647dcaf Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:44 +1100 Subject: arm64: Introduce get_cpu_ops() helper function This introduces get_cpu_ops() to return the CPU operations according to the given CPU index. For now, it simply returns the @cpu_ops[cpu] as before. Also, helper function __cpu_try_die() is introduced to be shared by cpu_die() and ipi_cpu_crash_stop(). So it shouldn't introduce any functional changes. Signed-off-by: Gavin Shan Signed-off-by: Catalin Marinas Acked-by: Mark Rutland --- arch/arm64/include/asm/cpu_ops.h | 2 +- arch/arm64/kernel/cpu_ops.c | 7 +++- arch/arm64/kernel/cpuidle.c | 9 +++--- arch/arm64/kernel/setup.c | 6 ++-- arch/arm64/kernel/smp.c | 70 ++++++++++++++++++++++++++-------------- 5 files changed, 62 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index baa13b5db2ca..d28e8f37d3b4 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -55,8 +55,8 @@ struct cpu_operations { #endif }; -extern const struct cpu_operations *cpu_ops[NR_CPUS]; int __init init_cpu_ops(int cpu); +extern const struct cpu_operations *get_cpu_ops(int cpu); static inline void __init init_bootcpu_ops(void) { diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index a6c3c816b618..e133011f64b5 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -20,7 +20,7 @@ extern const struct cpu_operations acpi_parking_protocol_ops; #endif extern const struct cpu_operations cpu_psci_ops; -const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; +static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, @@ -111,3 +111,8 @@ int __init init_cpu_ops(int cpu) return 0; } + +const struct cpu_operations *get_cpu_ops(int cpu) +{ + return cpu_ops[cpu]; +} diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index e4d6af2fdec7..b512b5503f6e 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -18,11 +18,11 @@ int arm_cpuidle_init(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); int ret = -EOPNOTSUPP; - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend && - cpu_ops[cpu]->cpu_init_idle) - ret = cpu_ops[cpu]->cpu_init_idle(cpu); + if (ops && ops->cpu_suspend && ops->cpu_init_idle) + ret = ops->cpu_init_idle(cpu); return ret; } @@ -37,8 +37,9 @@ int arm_cpuidle_init(unsigned int cpu) int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(cpu); - return cpu_ops[cpu]->cpu_suspend(index); + return ops->cpu_suspend(index); } #ifdef CONFIG_ACPI diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f66bd260cce8..3fd2c11c09fc 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -371,8 +371,10 @@ void __init setup_arch(char **cmdline_p) static inline bool cpu_can_disable(unsigned int cpu) { #ifdef CONFIG_HOTPLUG_CPU - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable) - return cpu_ops[cpu]->cpu_can_disable(cpu); + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_can_disable) + return ops->cpu_can_disable(cpu); #endif return false; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 6f8477d7f3be..e5c9862c271b 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -93,8 +93,10 @@ static inline int op_cpu_kill(unsigned int cpu) */ static int boot_secondary(unsigned int cpu, struct task_struct *idle) { - if (cpu_ops[cpu]->cpu_boot) - return cpu_ops[cpu]->cpu_boot(cpu); + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops->cpu_boot) + return ops->cpu_boot(cpu); return -EOPNOTSUPP; } @@ -196,6 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; + const struct cpu_operations *ops; unsigned int cpu; cpu = task_cpu(current); @@ -227,8 +230,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ check_local_cpu_capabilities(); - if (cpu_ops[cpu]->cpu_postboot) - cpu_ops[cpu]->cpu_postboot(); + ops = get_cpu_ops(cpu); + if (ops->cpu_postboot) + ops->cpu_postboot(); /* * Log the CPU info before it is marked online and might get read. @@ -266,19 +270,21 @@ asmlinkage notrace void secondary_start_kernel(void) #ifdef CONFIG_HOTPLUG_CPU static int op_cpu_disable(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); + /* * If we don't have a cpu_die method, abort before we reach the point * of no return. CPU0 may not have an cpu_ops, so test for it. */ - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) + if (!ops || !ops->cpu_die) return -EOPNOTSUPP; /* * We may need to abort a hot unplug for some other mechanism-specific * reason. */ - if (cpu_ops[cpu]->cpu_disable) - return cpu_ops[cpu]->cpu_disable(cpu); + if (ops->cpu_disable) + return ops->cpu_disable(cpu); return 0; } @@ -314,15 +320,17 @@ int __cpu_disable(void) static int op_cpu_kill(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); + /* * If we have no means of synchronising with the dying CPU, then assume * that it is really dead. We can only wait for an arbitrary length of * time and hope that it's dead, so let's skip the wait and just hope. */ - if (!cpu_ops[cpu]->cpu_kill) + if (!ops->cpu_kill) return 0; - return cpu_ops[cpu]->cpu_kill(cpu); + return ops->cpu_kill(cpu); } /* @@ -357,6 +365,7 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { unsigned int cpu = smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(cpu); idle_task_exit(); @@ -370,12 +379,22 @@ void cpu_die(void) * mechanism must perform all required cache maintenance to ensure that * no dirty lines are lost in the process of shutting down the CPU. */ - cpu_ops[cpu]->cpu_die(cpu); + ops->cpu_die(cpu); BUG(); } #endif +static void __cpu_try_die(int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_die) + ops->cpu_die(cpu); +#endif +} + /* * Kill the calling secondary CPU, early in bringup before it is turned * online. @@ -389,12 +408,11 @@ void cpu_die_early(void) /* Mark this CPU absent */ set_cpu_present(cpu, 0); -#ifdef CONFIG_HOTPLUG_CPU - update_cpu_boot_status(CPU_KILL_ME); - /* Check if we can park ourselves */ - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) - cpu_ops[cpu]->cpu_die(cpu); -#endif + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + update_cpu_boot_status(CPU_KILL_ME); + __cpu_try_die(cpu); + } + update_cpu_boot_status(CPU_STUCK_IN_KERNEL); cpu_park_loop(); @@ -488,10 +506,13 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) */ static int __init smp_cpu_setup(int cpu) { + const struct cpu_operations *ops; + if (init_cpu_ops(cpu)) return -ENODEV; - if (cpu_ops[cpu]->cpu_init(cpu)) + ops = get_cpu_ops(cpu); + if (ops->cpu_init(cpu)) return -ENODEV; set_cpu_possible(cpu, true); @@ -714,6 +735,7 @@ void __init smp_init_cpus(void) void __init smp_prepare_cpus(unsigned int max_cpus) { + const struct cpu_operations *ops; int err; unsigned int cpu; unsigned int this_cpu; @@ -744,10 +766,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (cpu == smp_processor_id()) continue; - if (!cpu_ops[cpu]) + ops = get_cpu_ops(cpu); + if (!ops) continue; - err = cpu_ops[cpu]->cpu_prepare(cpu); + err = ops->cpu_prepare(cpu); if (err) continue; @@ -863,10 +886,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) local_irq_disable(); sdei_mask_local_cpu(); -#ifdef CONFIG_HOTPLUG_CPU - if (cpu_ops[cpu]->cpu_die) - cpu_ops[cpu]->cpu_die(cpu); -#endif + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) + __cpu_try_die(cpu); /* just in case */ cpu_park_loop(); @@ -1044,8 +1065,9 @@ static bool have_cpu_die(void) { #ifdef CONFIG_HOTPLUG_CPU int any_cpu = raw_smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(any_cpu); - if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) + if (ops && ops->cpu_die) return true; #endif return false; -- cgit v1.2.3 From d4abd29d6775d4f3d0f01ca9a6de2d66dac74764 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 23 Mar 2020 12:33:36 +0000 Subject: arm64: head: Convert install_el2_stub to SYM_INNER_LABEL New assembly annotations have recently been introduced which aim to make the way we describe symbols in assembly more consistent. Recently the arm64 assembler was converted to use these but install_el2_stub was missed. Signed-off-by: Mark Brown [catalin.marinas@arm.com: changed to SYM_L_LOCAL] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a06727354fad..cd7dcbe4bcb6 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -600,7 +600,7 @@ set_hcr: isb ret -install_el2_stub: +SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) /* * When VHE is not in use, early init of EL2 and EL1 needs to be * done here. -- cgit v1.2.3