diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2018-04-10 13:36:45 +0300 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-04-11 20:49:30 +0300 |
commit | e8b22d0f4500c7bb6aab879d4e32b2a00c89d5f8 (patch) | |
tree | 1aef9db8bb544eb9cb546201a555d60d9127d975 /arch/arm64/kvm | |
parent | 22765f30dbaf1118c6ff0fcb8b99c9f2b4d396d5 (diff) | |
download | linux-e8b22d0f4500c7bb6aab879d4e32b2a00c89d5f8.tar.xz |
arm64: Move the content of bpi.S to hyp-entry.S
bpi.S was introduced as we were starting to build the Spectre v2
mitigation framework, and it was rather unclear that it would
become strictly KVM specific.
Now that the picture is a lot clearer, let's move the content
of that file to hyp-entry.S, where it actually belong.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 64 |
1 files changed, 63 insertions, 1 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 87dfecce82b1..bffece27b5c1 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 - ARM Ltd + * Copyright (C) 2015-2018 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify @@ -24,6 +24,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/mmu.h> .text .pushsection .hyp.text, "ax" @@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector) invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 ENDPROC(__kvm_hyp_vector) + +#ifdef CONFIG_KVM_INDIRECT_VECTORS +.macro hyp_ventry + .align 7 +1: .rept 27 + nop + .endr +/* + * The default sequence is to directly branch to the KVM vectors, + * using the computed offset. This applies for VHE as well as + * !ARM64_HARDEN_EL2_VECTORS. + * + * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced + * with: + * + * stp x0, x1, [sp, #-16]! + * movz x0, #(addr & 0xffff) + * movk x0, #((addr >> 16) & 0xffff), lsl #16 + * movk x0, #((addr >> 32) & 0xffff), lsl #32 + * br x0 + * + * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. + * See kvm_patch_vector_branch for details. + */ +alternative_cb kvm_patch_vector_branch + b __kvm_hyp_vector + (1b - 0b) + nop + nop + nop + nop +alternative_cb_end +.endm + +.macro generate_vectors +0: + .rept 16 + hyp_ventry + .endr + .org 0b + SZ_2K // Safety measure +.endm + + .align 11 +ENTRY(__bp_harden_hyp_vecs_start) + .rept BP_HARDEN_EL2_SLOTS + generate_vectors + .endr +ENTRY(__bp_harden_hyp_vecs_end) + + .popsection + +ENTRY(__smccc_workaround_1_smc_start) + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +ENTRY(__smccc_workaround_1_smc_end) +#endif |