diff options
author | Marc Zyngier <maz@kernel.org> | 2022-07-27 17:29:02 +0300 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2022-07-27 20:18:03 +0300 |
commit | 9f5fee05f6897d0fe0e3a44ade71bb85cd97b2ef (patch) | |
tree | c942f5d53ed8f342bdc8ddba823fe0ff60da74de /arch/arm64/kvm | |
parent | 03fe9cd05b9f38353208c23bd791dac47c912054 (diff) | |
download | linux-9f5fee05f6897d0fe0e3a44ade71bb85cd97b2ef.tar.xz |
KVM: arm64: Move nVHE stacktrace unwinding into its own compilation unit
The unwinding code doesn't really belong to the exit handling
code. Instead, move it to a file (conveniently named stacktrace.c
to confuse the reviewer), and move all the stacktrace-related
stuff there.
It will be joined by more code very soon.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Kalesh Singh <kaleshsingh@google.com>
Tested-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20220727142906.1856759-3-maz@kernel.org
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 98 | ||||
-rw-r--r-- | arch/arm64/kvm/stacktrace.c | 120 |
3 files changed, 121 insertions, 99 deletions
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index aa127ae9f675..5e33c2d4645a 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_KVM) += hyp/ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ - guest.o debug.o reset.o sys_regs.o \ + guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ arch_timer.o trng.o vmid.o \ vgic/vgic.o vgic/vgic-init.o \ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index c14fc4ba4422..ef8b57953aa2 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -319,104 +319,6 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu)); } -/* - * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry - * - * @arg : the hypervisor offset, used for address translation - * @where : the program counter corresponding to the stack frame - */ -static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) -{ - unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); - unsigned long hyp_offset = (unsigned long)arg; - - /* Mask tags and convert to kern addr */ - where = (where & va_mask) + hyp_offset; - kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset())); - - return true; -} - -static inline void kvm_nvhe_dump_backtrace_start(void) -{ - kvm_err("nVHE call trace:\n"); -} - -static inline void kvm_nvhe_dump_backtrace_end(void) -{ - kvm_err("---[ end nVHE call trace ]---\n"); -} - -/* - * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - * - * The host can directly access HYP stack pages in non-protected - * mode, so the unwinding is done directly from EL1. This removes - * the need for shared buffers between host and hypervisor for - * the stacktrace. - */ -static void hyp_dump_backtrace(unsigned long hyp_offset) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info; - struct unwind_state state; - - stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - - kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc); - - kvm_nvhe_dump_backtrace_start(); - unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset); - kvm_nvhe_dump_backtrace_end(); -} - -#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE -DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], - pkvm_stacktrace); - -/* - * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - * - * Dumping of the pKVM HYP backtrace is done by reading the - * stack addresses from the shared stacktrace buffer, since the - * host cannot directly access hypervisor memory in protected - * mode. - */ -static void pkvm_dump_backtrace(unsigned long hyp_offset) -{ - unsigned long *stacktrace - = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); - int i, size = NVHE_STACKTRACE_SIZE / sizeof(long); - - kvm_nvhe_dump_backtrace_start(); - /* The saved stacktrace is terminated by a null entry */ - for (i = 0; i < size && stacktrace[i]; i++) - kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]); - kvm_nvhe_dump_backtrace_end(); -} -#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ -static void pkvm_dump_backtrace(unsigned long hyp_offset) -{ - kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n"); -} -#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ - -/* - * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - */ -static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset) -{ - if (is_protected_kvm_enabled()) - pkvm_dump_backtrace(hyp_offset); - else - hyp_dump_backtrace(hyp_offset); -} - void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, u64 elr_phys, u64 par, uintptr_t vcpu, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c new file mode 100644 index 000000000000..9812aefdcfb4 --- /dev/null +++ b/arch/arm64/kvm/stacktrace.c @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * KVM nVHE hypervisor stack tracing support. + * + * The unwinder implementation depends on the nVHE mode: + * + * 1) Non-protected nVHE mode - the host can directly access the + * HYP stack pages and unwind the HYP stack in EL1. This saves having + * to allocate shared buffers for the host to read the unwinded + * stacktrace. + * + * 2) pKVM (protected nVHE) mode - the host cannot directly access + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared + * buffer where the host can read and print the stacktrace. + * + * Copyright (C) 2022 Google LLC + */ + +#include <linux/kvm.h> +#include <linux/kvm_host.h> + +#include <asm/stacktrace/nvhe.h> + +/* + * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry + * + * @arg : the hypervisor offset, used for address translation + * @where : the program counter corresponding to the stack frame + */ +static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) +{ + unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); + unsigned long hyp_offset = (unsigned long)arg; + + /* Mask tags and convert to kern addr */ + where = (where & va_mask) + hyp_offset; + kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset())); + + return true; +} + +static void kvm_nvhe_dump_backtrace_start(void) +{ + kvm_err("nVHE call trace:\n"); +} + +static void kvm_nvhe_dump_backtrace_end(void) +{ + kvm_err("---[ end nVHE call trace ]---\n"); +} + +/* + * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + * + * The host can directly access HYP stack pages in non-protected + * mode, so the unwinding is done directly from EL1. This removes + * the need for shared buffers between host and hypervisor for + * the stacktrace. + */ +static void hyp_dump_backtrace(unsigned long hyp_offset) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info; + struct unwind_state state; + + stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + + kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc); + + kvm_nvhe_dump_backtrace_start(); + unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset); + kvm_nvhe_dump_backtrace_end(); +} + +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE +DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], + pkvm_stacktrace); + +/* + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + * + * Dumping of the pKVM HYP backtrace is done by reading the + * stack addresses from the shared stacktrace buffer, since the + * host cannot directly access hypervisor memory in protected + * mode. + */ +static void pkvm_dump_backtrace(unsigned long hyp_offset) +{ + unsigned long *stacktrace + = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); + int i, size = NVHE_STACKTRACE_SIZE / sizeof(long); + + kvm_nvhe_dump_backtrace_start(); + /* The saved stacktrace is terminated by a null entry */ + for (i = 0; i < size && stacktrace[i]; i++) + kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]); + kvm_nvhe_dump_backtrace_end(); +} +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ +static void pkvm_dump_backtrace(unsigned long hyp_offset) +{ + kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n"); +} +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ + +/* + * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + */ +void kvm_nvhe_dump_backtrace(unsigned long hyp_offset) +{ + if (is_protected_kvm_enabled()) + pkvm_dump_backtrace(hyp_offset); + else + hyp_dump_backtrace(hyp_offset); +} |