diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2015-01-29 16:52:12 +0300 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2016-02-29 21:34:16 +0300 |
commit | b81125c791a2958cc60ae968fc1cdea82b7cd3b0 (patch) | |
tree | dd3fe9564cf60c1417bb65390646a0f4ce2ecbba /arch | |
parent | 1e947bad0b63b351cbdd9ad55ea5bf7e31c76036 (diff) | |
download | linux-b81125c791a2958cc60ae968fc1cdea82b7cd3b0.tar.xz |
arm64: KVM: VHE: Patch out use of HVC
With VHE, the host never issues an HVC instruction to get into the
KVM code, as we can simply branch there.
Use runtime code patching to simplify things a bit.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/hyp.S | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 40 |
2 files changed, 38 insertions, 9 deletions
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..0689a74e6ba0 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,7 +17,9 @@ #include <linux/linkage.h> +#include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cpufeature.h> /* * u64 kvm_call_hyp(void *hypfn, ...); @@ -38,6 +40,11 @@ * arch/arm64/kernel/hyp_stub.S. */ ENTRY(kvm_call_hyp) +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret +alternative_else + b __vhe_hyp_call + nop +alternative_endif ENDPROC(kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 93e8d983c0bd..1bdeee70833e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -38,6 +38,34 @@ ldp x0, x1, [sp], #16 .endm +.macro do_el2_call + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + sub sp, sp, #16 + str lr, [sp] + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + ldr lr, [sp] + add sp, sp, #16 +.endm + +ENTRY(__vhe_hyp_call) + do_el2_call + /* + * We used to rely on having an exception return to get + * an implicit isb. In the E2H case, we don't have it anymore. + * rather than changing all the leaf functions, just do it here + * before returning to the rest of the kernel. + */ + isb + ret +ENDPROC(__vhe_hyp_call) + el1_sync: // Guest trapped into EL2 save_x0_to_x3 @@ -58,19 +86,13 @@ el1_sync: // Guest trapped into EL2 mrs x0, vbar_el2 b 2f -1: stp lr, xzr, [sp, #-16]! - +1: /* - * Compute the function address in EL2, and shuffle the parameters. + * Perform the EL2 call */ kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr + do_el2_call - ldp lr, xzr, [sp], #16 2: eret el1_trap: |