diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 19:55:35 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 19:55:35 +0300 |
commit | 10dc3747661bea9215417b659449bb7b8ed3df2c (patch) | |
tree | d943974b4941203a7db2fabe4896852cf0f16bc4 /arch/arm | |
parent | 047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff) | |
parent | f958ee745f70b60d0e41927cab2c073104bc70c2 (diff) | |
download | linux-10dc3747661bea9215417b659449bb7b8ed3df2c.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"One of the largest releases for KVM... Hardly any generic
changes, but lots of architecture-specific updates.
ARM:
- VHE support so that we can run the kernel at EL2 on ARMv8.1 systems
- PMU support for guests
- 32bit world switch rewritten in C
- various optimizations to the vgic save/restore code.
PPC:
- enabled KVM-VFIO integration ("VFIO device")
- optimizations to speed up IPIs between vcpus
- in-kernel handling of IOMMU hypercalls
- support for dynamic DMA windows (DDW).
s390:
- provide the floating point registers via sync regs;
- separated instruction vs. data accesses
- dirty log improvements for huge guests
- bugfixes and documentation improvements.
x86:
- Hyper-V VMBus hypercall userspace exit
- alternative implementation of lowest-priority interrupts using
vector hashing (for better VT-d posted interrupt support)
- fixed guest debugging with nested virtualizations
- improved interrupt tracking in the in-kernel IOAPIC
- generic infrastructure for tracking writes to guest
memory - currently its only use is to speedup the legacy shadow
paging (pre-EPT) case, but in the future it will be used for
virtual GPUs as well
- much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits)
KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch
KVM: x86: disable MPX if host did not enable MPX XSAVE features
arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit
arm64: KVM: vgic-v3: Reset LRs at boot time
arm64: KVM: vgic-v3: Do not save an LR known to be empty
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
arm64: KVM: vgic-v3: Avoid accessing ICH registers
KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit
KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit
KVM: arm/arm64: vgic-v2: Reset LRs at boot time
KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty
KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function
KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
KVM: s390: allocate only one DMA page per VM
KVM: s390: enable STFLE interpretation only if enabled for the guest
KVM: s390: wake up when the VCPU cpu timer expires
KVM: s390: step the VCPU timer while in enabled wait
KVM: s390: protect VCPU cpu timer with a seqcount
KVM: s390: step VCPU cpu timer during kvm_run ioctl
...
Diffstat (limited to 'arch/arm')
29 files changed, 1405 insertions, 1385 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 194c91b610ff..15d58b42d5a1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -19,38 +19,7 @@ #ifndef __ARM_KVM_ASM_H__ #define __ARM_KVM_ASM_H__ -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxiliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c7_PAR 18 /* Physical Address Register */ -#define c7_PAR_high 19 /* PAR top 32 bits */ -#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */ -#define c10_PRRR 21 /* Primary Region Remap Register */ -#define c10_NMRR 22 /* Normal Memory Remap Register */ -#define c12_VBAR 23 /* Vector Base Address Register */ -#define c13_CID 24 /* Context ID Register */ -#define c13_TID_URW 25 /* Thread ID, User R/W */ -#define c13_TID_URO 26 /* Thread ID, User R/O */ -#define c13_TID_PRIV 27 /* Thread ID, Privileged */ -#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ -#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */ -#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */ -#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */ +#include <asm/virt.h> #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 @@ -86,19 +55,15 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - extern char __kvm_hyp_vector[]; -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +extern void __init_stage2_translation(void); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3095df091ff8..ee5328fc4b06 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -68,12 +68,12 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; } static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -83,13 +83,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); } static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return cpsr_mode > USR_MODE;; } @@ -108,11 +108,6 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; } -static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hyp_pc; -} - static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; @@ -143,6 +138,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } +static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); +} + /* Get Access Size from a data abort */ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) { @@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; + return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f9f27792d8ed..385070180c25 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -85,20 +85,61 @@ struct kvm_vcpu_fault_info { u32 hsr; /* Hyp Syndrome Register */ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ u32 hpfar; /* Hyp IPA Fault Address Register */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; -typedef struct vfp_hard_struct kvm_cpu_context_t; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + c0_MPIDR, /* MultiProcessor ID Register */ + c0_CSSELR, /* Cache Size Selection Register */ + c1_SCTLR, /* System Control Register */ + c1_ACTLR, /* Auxiliary Control Register */ + c1_CPACR, /* Coprocessor Access Control */ + c2_TTBR0, /* Translation Table Base Register 0 */ + c2_TTBR0_high, /* TTBR0 top 32 bits */ + c2_TTBR1, /* Translation Table Base Register 1 */ + c2_TTBR1_high, /* TTBR1 top 32 bits */ + c2_TTBCR, /* Translation Table Base Control R. */ + c3_DACR, /* Domain Access Control Register */ + c5_DFSR, /* Data Fault Status Register */ + c5_IFSR, /* Instruction Fault Status Register */ + c5_ADFSR, /* Auxilary Data Fault Status R */ + c5_AIFSR, /* Auxilary Instrunction Fault Status R */ + c6_DFAR, /* Data Fault Address Register */ + c6_IFAR, /* Instruction Fault Address Register */ + c7_PAR, /* Physical Address Register */ + c7_PAR_high, /* PAR top 32 bits */ + c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ + c10_PRRR, /* Primary Region Remap Register */ + c10_NMRR, /* Normal Memory Remap Register */ + c12_VBAR, /* Vector Base Address Register */ + c13_CID, /* Context ID Register */ + c13_TID_URW, /* Thread ID, User R/W */ + c13_TID_URO, /* Thread ID, User R/O */ + c13_TID_PRIV, /* Thread ID, Privileged */ + c14_CNTKCTL, /* Timer Control Register (PL1) */ + c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ + c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ + NR_CP15_REGS /* Number of regs (incl. invalid) */ +}; + +struct kvm_cpu_context { + struct kvm_regs gp_regs; + struct vfp_hard_struct vfp; + u32 cp15[NR_CP15_REGS]; +}; + +typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { - struct kvm_regs regs; + struct kvm_cpu_context ctxt; int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - /* The CPU type we expose to the VM */ u32 midr; @@ -111,9 +152,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - /* Host FP context */ kvm_cpu_context_t *host_cpu_context; @@ -158,12 +196,14 @@ struct kvm_vcpu_stat { u64 exits; }; +#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] + int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); +unsigned long kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); #define KVM_ARCH_WANT_MMU_NOTIFIER @@ -220,6 +260,11 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ + kvm_call_hyp(__init_stage2_translation); +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; @@ -242,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} +static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..f0e860761380 --- /dev/null +++ b/arch/arm/include/asm/kvm_hyp.h @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include <linux/compiler.h> +#include <linux/kvm_host.h> +#include <asm/kvm_mmu.h> +#include <asm/vfp.h> + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + +/* AArch64 compatibility macros, only for the timer so far */ +#define read_sysreg_el0(r) read_sysreg(r##_el0) +#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) + +#define cntv_ctl_el0 CNTV_CTL +#define cntv_cval_el0 CNTV_CVAL +#define cntvoff_el2 CNTVOFF +#define cnthctl_el2 CNTHCTL + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); + +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + +#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a520b7987a29..da44be9db4fa 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -179,7 +179,7 @@ struct kvm; static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { - return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; + return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c5784..d4ceaf5f299b 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 871b8267d211..27d05813ff09 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -170,41 +170,11 @@ int main(void) DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); #ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); - DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); + DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); + DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); #endif BLANK(); #ifdef CONFIG_VDSO diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8b60fde5ce48..b4139cbbbdd9 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -18,6 +18,11 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define HYPERVISOR_TEXT \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ @@ -108,6 +113,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT KPROBES_TEXT *(.gnu.warning) *(.glue_7) diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index c5eef02c52ba..eb1bf4309c13 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -17,6 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) KVM := ../../../virt/kvm kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp/ obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 08e49c423c24..76552b51c7ae 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -28,6 +28,7 @@ #include <linux/sched.h> #include <linux/kvm.h> #include <trace/events/kvm.h> +#include <kvm/arm_pmu.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -265,6 +266,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kvm_vgic_vcpu_destroy(vcpu); + kvm_pmu_vcpu_destroy(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } @@ -320,6 +322,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + kvm_timer_vcpu_put(vcpu); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, @@ -577,6 +580,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * non-preemptible context. */ preempt_disable(); + kvm_pmu_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); @@ -593,6 +597,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || vcpu->arch.power_off || vcpu->arch.pause) { local_irq_enable(); + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); preempt_enable(); @@ -642,10 +647,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* - * We must sync the timer state before the vgic state so that - * the vgic can properly sample the updated state of the + * We must sync the PMU and timer state before the vgic state so + * that the vgic can properly sample the updated state of the * interrupt line. */ + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); @@ -823,11 +829,54 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return 0; } +static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); + break; + } + + return ret; +} + +static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); + break; + } + + return ret; +} + +static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; switch (ioctl) { case KVM_ARM_VCPU_INIT: { @@ -870,6 +919,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return -E2BIG; return kvm_arm_copy_reg_indices(vcpu, user_list->reg); } + case KVM_SET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_set_attr(vcpu, &attr); + } + case KVM_GET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_get_attr(vcpu, &attr); + } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_has_attr(vcpu, &attr); + } default: return -EINVAL; } @@ -967,6 +1031,11 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static void cpu_init_stage2(void *dummy) +{ + __cpu_init_stage2(); +} + static void cpu_init_hyp_mode(void *dummy) { phys_addr_t boot_pgd_ptr; @@ -985,6 +1054,7 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } @@ -1035,6 +1105,82 @@ static inline void hyp_cpu_pm_init(void) } #endif +static void teardown_common_resources(void) +{ + free_percpu(kvm_host_cpu_state); +} + +static int init_common_resources(void) +{ + kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); + if (!kvm_host_cpu_state) { + kvm_err("Cannot allocate host CPU state\n"); + return -ENOMEM; + } + + return 0; +} + +static int init_subsystems(void) +{ + int err; + + /* + * Init HYP view of VGIC + */ + err = kvm_vgic_hyp_init(); + switch (err) { + case 0: + vgic_present = true; + break; + case -ENODEV: + case -ENXIO: + vgic_present = false; + break; + default: + return err; + } + + /* + * Init HYP architected timer support + */ + err = kvm_timer_hyp_init(); + if (err) + return err; + + kvm_perf_init(); + kvm_coproc_table_init(); + + return 0; +} + +static void teardown_hyp_mode(void) +{ + int cpu; + + if (is_kernel_in_hyp_mode()) + return; + + free_hyp_pgds(); + for_each_possible_cpu(cpu) + free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); +} + +static int init_vhe_mode(void) +{ + /* + * Execute the init code on each CPU. + */ + on_each_cpu(cpu_init_stage2, NULL, 1); + + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + + kvm_info("VHE mode initialized successfully\n"); + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ @@ -1065,7 +1211,7 @@ static int init_hyp_mode(void) stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; - goto out_free_stack_pages; + goto out_err; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; @@ -1074,16 +1220,16 @@ static int init_hyp_mode(void) /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; + goto out_err; } err = create_hyp_mappings(__start_rodata, __end_rodata); if (err) { kvm_err("Cannot map rodata section\n"); - goto out_free_mappings; + goto out_err; } /* @@ -1095,20 +1241,10 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; + goto out_err; } } - /* - * Map the host CPU structures - */ - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); - if (!kvm_host_cpu_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host CPU state\n"); - goto out_free_mappings; - } - for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; @@ -1117,7 +1253,7 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map host CPU state: %d\n", err); - goto out_free_context; + goto out_err; } } @@ -1126,34 +1262,22 @@ static int init_hyp_mode(void) */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); - /* - * Init HYP view of VGIC - */ - err = kvm_vgic_hyp_init(); - switch (err) { - case 0: - vgic_present = true; - break; - case -ENODEV: - case -ENXIO: - vgic_present = false; - break; - default: - goto out_free_context; - } - - /* - * Init HYP architected timer support - */ - err = kvm_timer_hyp_init(); - if (err) - goto out_free_context; - #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif - kvm_perf_init(); + cpu_notifier_register_begin(); + + err = __register_cpu_notifier(&hyp_init_cpu_nb); + + cpu_notifier_register_done(); + + if (err) { + kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + goto out_err; + } + + hyp_cpu_pm_init(); /* set size of VMID supported by CPU */ kvm_vmid_bits = kvm_get_vmid_bits(); @@ -1162,14 +1286,9 @@ static int init_hyp_mode(void) kvm_info("Hyp mode initialized successfully\n"); return 0; -out_free_context: - free_percpu(kvm_host_cpu_state); -out_free_mappings: - free_hyp_pgds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + out_err: + teardown_hyp_mode(); kvm_err("error initializing Hyp mode: %d\n", err); return err; } @@ -1213,26 +1332,27 @@ int kvm_arch_init(void *opaque) } } - cpu_notifier_register_begin(); - - err = init_hyp_mode(); + err = init_common_resources(); if (err) - goto out_err; + return err; - err = __register_cpu_notifier(&hyp_init_cpu_nb); - if (err) { - kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + if (is_kernel_in_hyp_mode()) + err = init_vhe_mode(); + else + err = init_hyp_mode(); + if (err) goto out_err; - } - - cpu_notifier_register_done(); - hyp_cpu_pm_init(); + err = init_subsystems(); + if (err) + goto out_hyp; - kvm_coproc_table_init(); return 0; + +out_hyp: + teardown_hyp_mode(); out_err: - cpu_notifier_register_done(); + teardown_common_resources(); return err; } diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index f3d88dc388bc..1bb2b79c01ff 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -16,6 +16,8 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +#include <linux/bsearch.h> #include <linux/mm.h> #include <linux/kvm_host.h> #include <linux/uaccess.h> @@ -54,8 +56,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, const struct coproc_reg *r, u64 val) { - vcpu->arch.cp15[r->reg] = val & 0xffffffff; - vcpu->arch.cp15[r->reg + 1] = val >> 32; + vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; } static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, @@ -63,9 +65,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, { u64 val; - val = vcpu->arch.cp15[r->reg + 1]; + val = vcpu_cp15(vcpu, r->reg + 1); val = val << 32; - val = val | vcpu->arch.cp15[r->reg]; + val = val | vcpu_cp15(vcpu, r->reg); return val; } @@ -104,7 +106,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ - vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | + vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } @@ -117,7 +119,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); return true; } @@ -139,7 +141,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); return true; } @@ -156,7 +158,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; + vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; } static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) @@ -171,7 +173,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) else actlr &= ~(1U << 6); - vcpu->arch.cp15[c1_ACTLR] = actlr; + vcpu_cp15(vcpu, c1_ACTLR) = actlr; } /* @@ -218,9 +220,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, BUG_ON(!p->is_write); - vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); + vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); if (p->is_64bit) - vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); + vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); kvm_toggle_cache(vcpu, was_enabled); return true; @@ -381,17 +383,26 @@ static const struct coproc_reg cp15_regs[] = { { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; +static int check_reg_table(const struct coproc_reg *table, unsigned int n) +{ + unsigned int i; + + for (i = 1; i < n; i++) { + if (cmp_reg(&table[i-1], &table[i]) >= 0) { + kvm_err("reg table %p out of order (%d)\n", table, i - 1); + return 1; + } + } + + return 0; +} + /* Target specific emulation tables */ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) { - unsigned int i; - - for (i = 1; i < table->num; i++) - BUG_ON(cmp_reg(&table->table[i-1], - &table->table[i]) >= 0); - + BUG_ON(check_reg_table(table->table, table->num)); target_tables[table->target] = table; } @@ -405,29 +416,32 @@ static const struct coproc_reg *get_target_table(unsigned target, size_t *num) return table->table; } +#define reg_to_match_value(x) \ + ({ \ + unsigned long val; \ + val = (x)->CRn << 11; \ + val |= (x)->CRm << 7; \ + val |= (x)->Op1 << 4; \ + val |= (x)->Op2 << 1; \ + val |= !(x)->is_64bit; \ + val; \ + }) + +static int match_reg(const void *key, const void *elt) +{ + const unsigned long pval = (unsigned long)key; + const struct coproc_reg *r = elt; + + return pval - reg_to_match_value(r); +} + static const struct coproc_reg *find_reg(const struct coproc_params *params, const struct coproc_reg table[], unsigned int num) { - unsigned int i; - - for (i = 0; i < num; i++) { - const struct coproc_reg *r = &table[i]; - - if (params->is_64bit != r->is_64) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->Op2 != r->Op2) - continue; + unsigned long pval = reg_to_match_value(params); - return r; - } - return NULL; + return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); } static int emulate_cp15(struct kvm_vcpu *vcpu, @@ -645,6 +659,9 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, + { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, @@ -660,9 +677,6 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, }; /* @@ -901,7 +915,7 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], id); } @@ -911,13 +925,13 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); case KVM_REG_ARM_VFP_MVFR0: val = fmrx(MVFR0); return reg_to_user(uaddr, &val, id); @@ -945,7 +959,7 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], uaddr, id); } @@ -955,13 +969,13 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); /* These are invariant. */ case KVM_REG_ARM_VFP_MVFR0: if (reg_from_user(&val, uaddr, id)) @@ -1030,7 +1044,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = vcpu_cp15_reg64_get(vcpu, r); ret = reg_to_user(uaddr, &val, reg->id); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); + ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); } return ret; @@ -1060,7 +1074,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!ret) vcpu_cp15_reg64_set(vcpu, r, val); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); + ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); } return ret; @@ -1096,7 +1110,7 @@ static int write_demux_regids(u64 __user *uindices) static u64 cp15_to_index(const struct coproc_reg *reg) { u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64) { + if (reg->is_64bit) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); /* @@ -1210,8 +1224,8 @@ void kvm_coproc_table_init(void) unsigned int i; /* Make sure tables are unique and in order. */ - for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) - BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); + BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); + BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15))); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) @@ -1248,7 +1262,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) const struct coproc_reg *table; /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); + memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); /* Generic chip reset first (so target could override). */ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); @@ -1257,6 +1271,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); + if (vcpu_cp15(vcpu, num) == 0x42424242) + panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 88d24a3a9778..eef1759c2b65 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -37,7 +37,7 @@ struct coproc_reg { unsigned long Op1; unsigned long Op2; - bool is_64; + bool is_64bit; /* Trapped access from guest, if non-NULL. */ bool (*access)(struct kvm_vcpu *, @@ -47,7 +47,7 @@ struct coproc_reg { /* Initialization for vcpu. */ void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ + /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ unsigned long reg; /* Value (usually reset value) */ @@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; } static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = r->val; } static inline void reset_unknown64(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); + BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; + vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; } static inline int cmp_reg(const struct coproc_reg *i1, @@ -141,7 +141,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, return i1->Op1 - i2->Op1; if (i1->Op2 != i2->Op2) return i1->Op2 - i2->Op2; - return i2->is_64 - i1->is_64; + return i2->is_64bit - i1->is_64bit; } @@ -150,8 +150,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x -#define is64 .is_64 = true -#define is32 .is_64 = false +#define is64 .is_64bit = true +#define is32 .is_64bit = false bool access_vm_reg(struct kvm_vcpu *vcpu, const struct coproc_params *p, diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index dc99159857b4..a494def3f195 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -112,7 +112,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { */ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) { - unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; + unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { @@ -147,15 +147,15 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; default: BUG(); } @@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static u32 exc_vector_base(struct kvm_vcpu *vcpu) { - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + u32 vbar = vcpu_cp15(vcpu, c12_VBAR); if (sctlr & SCTLR_V) return 0xffff0000; @@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) { unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; @@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) if (is_pabt) { /* Set IFAR and IFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_IFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_IFSR] = 2; + vcpu_cp15(vcpu, c5_IFSR) = 2; } else { /* !iabt */ /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_DFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_DFSR] = 2; + vcpu_cp15(vcpu, c5_DFSR) = 2; } } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 99361f11354a..9093ed0f8b2a 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -25,7 +25,6 @@ #include <asm/cputype.h> #include <asm/uaccess.h> #include <asm/kvm.h> -#include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> @@ -55,7 +54,7 @@ static u64 core_reg_offset_from_id(u64 id) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off; if (KVM_REG_SIZE(reg->id) != 4) @@ -72,7 +71,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off, val; if (KVM_REG_SIZE(reg->id) != 4) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3ede90d8b20b..3f1ef0dbc899 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,13 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08lx\n", - kvm_vcpu_get_hyp_pc(vcpu)); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); - case ARM_EXCEPTION_DATA_ABORT: - case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: /* * See ARM ARM B1.14.1: "Hyp traps on instructions diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile new file mode 100644 index 000000000000..8dfa5f7f9290 --- /dev/null +++ b/arch/arm/kvm/hyp/Makefile @@ -0,0 +1,17 @@ +# +# Makefile for Kernel-based Virtual Machine module, HYP part +# + +KVM=../../../../virt/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o + +obj-$(CONFIG_KVM_ARM_HOST) += tlb.o +obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vfp.o +obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o +obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c new file mode 100644 index 000000000000..111bda8cdebd --- /dev/null +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -0,0 +1,77 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/kvm_hyp.h> + +__asm__(".arch_extension virt"); + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); + ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); + ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); + ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); + ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); + ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); + ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); + ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); + ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); + ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und); + ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und); + ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und); + ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq); + ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq); + ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq); + ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq); +} + +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt) +{ + write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr); + write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp); + write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf); + write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc); + write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt); + write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq); +} diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c new file mode 100644 index 000000000000..c4782812714c --- /dev/null +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -0,0 +1,84 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/kvm_hyp.h> + +static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) +{ + return (u64 *)(ctxt->cp15 + idx); +} + +void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR); + ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); + ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); + ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); + *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); + *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); + ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); + ctxt->cp15[c3_DACR] = read_sysreg(DACR); + ctxt->cp15[c5_DFSR] = read_sysreg(DFSR); + ctxt->cp15[c5_IFSR] = read_sysreg(IFSR); + ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR); + ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR); + ctxt->cp15[c6_DFAR] = read_sysreg(DFAR); + ctxt->cp15[c6_IFAR] = read_sysreg(IFAR); + *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR); + ctxt->cp15[c10_PRRR] = read_sysreg(PRRR); + ctxt->cp15[c10_NMRR] = read_sysreg(NMRR); + ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0); + ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1); + ctxt->cp15[c12_VBAR] = read_sysreg(VBAR); + ctxt->cp15[c13_CID] = read_sysreg(CID); + ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW); + ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO); + ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV); + ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL); +} + +void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR); + write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR); + write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR); + write_sysreg(ctxt->cp15[c1_CPACR], CPACR); + write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0); + write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1); + write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR); + write_sysreg(ctxt->cp15[c3_DACR], DACR); + write_sysreg(ctxt->cp15[c5_DFSR], DFSR); + write_sysreg(ctxt->cp15[c5_IFSR], IFSR); + write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR); + write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR); + write_sysreg(ctxt->cp15[c6_DFAR], DFAR); + write_sysreg(ctxt->cp15[c6_IFAR], IFAR); + write_sysreg(*cp15_64(ctxt, c7_PAR), PAR); + write_sysreg(ctxt->cp15[c10_PRRR], PRRR); + write_sysreg(ctxt->cp15[c10_NMRR], NMRR); + write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0); + write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1); + write_sysreg(ctxt->cp15[c12_VBAR], VBAR); + write_sysreg(ctxt->cp15[c13_CID], CID); + write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW); + write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO); + write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV); + write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL); +} diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S new file mode 100644 index 000000000000..21c238871c9e --- /dev/null +++ b/arch/arm/kvm/hyp/entry.S @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/kvm_arm.h> + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR) + +/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */ +ENTRY(__guest_enter) + @ Save host registers + add r1, r1, #(USR_REGS_OFFSET + S_R4) + stm r1!, {r4-r12} + str lr, [r1, #4] @ Skip SP_usr (already saved) + + @ Restore guest registers + add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + ldr lr, [r0, #S_LR] + ldm r0, {r0-r12} + + clrex + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + /* + * return convention: + * guest r0, r1, r2 saved on the stack + * r0: vcpu pointer + * r1: exception code + */ + + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3) + stm r2!, {r3-r12} + str lr, [r2, #4] + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + pop {r3, r4, r5} @ r0, r1, r2 + stm r2, {r3-r5} + + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #(USR_REGS_OFFSET + S_R4) + ldm r0!, {r4-r12} + ldr lr, [r0, #4] + + mov r0, r1 + bx lr +ENDPROC(__guest_exit) + +/* + * If VFPv3 support is not available, then we will not switch the VFP + * registers; however cp10 and cp11 accesses will still trap and fallback + * to the regular coprocessor emulation code, which currently will + * inject an undefined exception to the guest. + */ +#ifdef CONFIG_VFPv3 +ENTRY(__vfp_guest_restore) + push {r3, r4, lr} + + @ NEON/VFP used. Turn on VFP access. + mrc p15, 4, r1, c1, c1, 2 @ HCPTR + bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11)) + mcr p15, 4, r1, c1, c1, 2 @ HCPTR + isb + + @ Switch VFP/NEON hardware state to the guest's + mov r4, r0 + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #CPU_CTXT_VFP + bl __vfp_save_state + add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) + bl __vfp_restore_state + + pop {r3, r4, lr} + pop {r0, r1, r2} + clrex + eret +ENDPROC(__vfp_guest_restore) +#endif + + .popsection + diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S new file mode 100644 index 000000000000..78091383a5d9 --- /dev/null +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/linkage.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +.macro load_vcpu reg + mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR +.endm + +/******************************************************************** + * Hypervisor exception vector and handlers + * + * + * The KVM/ARM Hypervisor ABI is defined as follows: + * + * Entry to Hyp mode from the host kernel will happen _only_ when an HVC + * instruction is issued since all traps are disabled when running the host + * kernel as per the Hyp-mode initialization at boot time. + * + * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc + * below) when the HVC instruction is called from SVC mode (i.e. a guest or the + * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC + * instructions are called from within Hyp-mode. + * + * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): + * Switching to Hyp mode is done through a simple HVC #0 instruction. The + * exception vector code will check that the HVC comes from VMID==0. + * - r0 contains a pointer to a HYP function + * - r1, r2, and r3 contain arguments to the above function. + * - The HYP function will be called with its arguments in r0, r1 and r2. + * On HYP function return, we return directly to SVC. + * + * Note that the above is used to execute code in Hyp-mode from a host-kernel + * point of view, and is a different concept from performing a world-switch and + * executing guest code SVC mode (with a VMID != 0). + */ + + .align 5 +__kvm_hyp_vector: + .global __kvm_hyp_vector + + @ Hyp-mode exception vector + W(b) hyp_reset + W(b) hyp_undef + W(b) hyp_svc + W(b) hyp_pabt + W(b) hyp_dabt + W(b) hyp_hvc + W(b) hyp_irq + W(b) hyp_fiq + +.macro invalid_vector label, cause + .align +\label: mov r0, #\cause + b __hyp_panic +.endm + + invalid_vector hyp_reset ARM_EXCEPTION_RESET + invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED + invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE + invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT + invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT + invalid_vector hyp_fiq ARM_EXCEPTION_FIQ + +ENTRY(__hyp_do_panic) + mrs lr, cpsr + bic lr, lr, #MODE_MASK + orr lr, lr, #SVC_MODE +THUMB( orr lr, lr, #PSR_T_BIT ) + msr spsr_cxsf, lr + ldr lr, =panic + msr ELR_hyp, lr + ldr lr, =kvm_call_hyp + clrex + eret +ENDPROC(__hyp_do_panic) + +hyp_hvc: + /* + * Getting here is either because of a trap from a guest, + * or from executing HVC from the host kernel, which means + * "do something in Hyp mode". + */ + push {r0, r1, r2} + + @ Check syndrome register + mrc p15, 4, r1, c5, c2, 0 @ HSR + lsr r0, r1, #HSR_EC_SHIFT + cmp r0, #HSR_EC_HVC + bne guest_trap @ Not HVC instr. + + /* + * Let's check if the HVC came from VMID 0 and allow simple + * switch to Hyp mode + */ + mrrc p15, 6, r0, r2, c2 + lsr r2, r2, #16 + and r2, r2, #0xff + cmp r2, #0 + bne guest_trap @ Guest called HVC + + /* + * Getting here means host called HVC, we shift parameters and branch + * to Hyp function. + */ + pop {r0, r1, r2} + + /* Check for __hyp_get_vectors */ + cmp r0, #-1 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR + beq 1f + + push {lr} + + mov lr, r0 + mov r0, r1 + mov r1, r2 + mov r2, r3 + +THUMB( orr lr, #1) + blx lr @ Call the HYP function + + pop {lr} +1: eret + +guest_trap: + load_vcpu r0 @ Load VCPU pointer to r0 + +#ifdef CONFIG_VFPv3 + @ Check for a VFP access + lsr r1, r1, #HSR_EC_SHIFT + cmp r1, #HSR_EC_CP_0_13 + beq __vfp_guest_restore +#endif + + mov r1, #ARM_EXCEPTION_HVC + b __guest_exit + +hyp_irq: + push {r0, r1, r2} + mov r1, #ARM_EXCEPTION_IRQ + load_vcpu r0 @ Load VCPU pointer to r0 + b __guest_exit + + .ltorg + + .popsection diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..7be39af2ed6c --- /dev/null +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/types.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_hyp.h> + +void __hyp_text __init_stage2_translation(void) +{ + u64 val; + + val = read_sysreg(VTCR) & ~VTCR_MASK; + + val |= read_sysreg(HTCR) & VTCR_HTCR_SH; + val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S; + + write_sysreg(val, VTCR); +} diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c new file mode 100644 index 000000000000..b13caa90cd44 --- /dev/null +++ b/arch/arm/kvm/hyp/switch.c @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/kvm_asm.h> +#include <asm/kvm_hyp.h> + +__asm__(".arch_extension virt"); + +/* + * Activate the traps, saving the host's fpexc register before + * overwriting it. We'll restore it on VM exit. + */ +static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) +{ + u32 val; + + /* + * We are about to set HCPTR.TCP10/11 to trap all floating point + * register accesses to HYP, however, the ARM ARM clearly states that + * traps are only taken to HYP if the operation would not otherwise + * trap to SVC. Therefore, always make sure that for 32-bit guests, + * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits. + */ + val = read_sysreg(VFP_FPEXC); + *fpexc_host = val; + if (!(val & FPEXC_EN)) { + write_sysreg(val | FPEXC_EN, VFP_FPEXC); + isb(); + } + + write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ + write_sysreg(HSTR_T(15), HSTR); + write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); + val = read_sysreg(HDCR); + write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); +} + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + u32 val; + + write_sysreg(0, HCR); + write_sysreg(0, HSTR); + val = read_sysreg(HDCR); + write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR); + write_sysreg(0, HCPTR); +} + +static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + write_sysreg(vcpu->arch.midr, VPIDR); +} + +static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) +{ + write_sysreg(0, VTTBR); + write_sysreg(read_sysreg(MIDR), VPIDR); +} + +static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_save_state(vcpu); +} + +static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_restore_state(vcpu); +} + +static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u32 hsr = read_sysreg(HSR); + u8 ec = hsr >> HSR_EC_SHIFT; + u32 hpfar, far; + + vcpu->arch.fault.hsr = hsr; + + if (ec == HSR_EC_IABT) + far = read_sysreg(HIFAR); + else if (ec == HSR_EC_DABT) + far = read_sysreg(HDFAR); + else + return true; + + /* + * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: + * + * Abort on the stage 2 translation for a memory access from a + * Non-secure PL1 or PL0 mode: + * + * For any Access flag fault or Translation fault, and also for any + * Permission fault on the stage 2 translation of a memory access + * made as part of a translation table walk for a stage 1 translation, + * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR + * is UNKNOWN. + */ + if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) { + u64 par, tmp; + + par = read_sysreg(PAR); + write_sysreg(far, ATS1CPR); + isb(); + + tmp = read_sysreg(PAR); + write_sysreg(par, PAR); + + if (unlikely(tmp & 1)) + return false; /* Translation failed, back to guest */ + + hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4; + } else { + hpfar = read_sysreg(HPFAR); + } + + vcpu->arch.fault.hxfar = far; + vcpu->arch.fault.hpfar = hpfar; + return true; +} + +static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_cpu_context *guest_ctxt; + bool fp_enabled; + u64 exit_code; + u32 fpexc; + + vcpu = kern_hyp_va(vcpu); + write_sysreg(vcpu, HTPIDR); + + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + guest_ctxt = &vcpu->arch.ctxt; + + __sysreg_save_state(host_ctxt); + __banked_save_state(host_ctxt); + + __activate_traps(vcpu, &fpexc); + __activate_vm(vcpu); + + __vgic_restore_state(vcpu); + __timer_restore_state(vcpu); + + __sysreg_restore_state(guest_ctxt); + __banked_restore_state(guest_ctxt); + + /* Jump in the fire! */ +again: + exit_code = __guest_enter(vcpu, host_ctxt); + /* And we're baaack! */ + + if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu)) + goto again; + + fp_enabled = __vfp_enabled(); + + __banked_save_state(guest_ctxt); + __sysreg_save_state(guest_ctxt); + __timer_save_state(vcpu); + __vgic_save_state(vcpu); + + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + + __banked_restore_state(host_ctxt); + __sysreg_restore_state(host_ctxt); + + if (fp_enabled) { + __vfp_save_state(&guest_ctxt->vfp); + __vfp_restore_state(&host_ctxt->vfp); + } + + write_sysreg(fpexc, VFP_FPEXC); + + return exit_code; +} + +__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +static const char * const __hyp_panic_string[] = { + [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", + [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", + [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x", + [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x", + [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x", + [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x", +}; + +void __hyp_text __noreturn __hyp_panic(int cause) +{ + u32 elr = read_special(ELR_hyp); + u32 val; + + if (cause == ARM_EXCEPTION_DATA_ABORT) + val = read_sysreg(HDFAR); + else + val = read_special(SPSR); + + if (read_sysreg(VTTBR)) { + struct kvm_vcpu *vcpu; + struct kvm_cpu_context *host_ctxt; + + vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + __sysreg_restore_state(host_ctxt); + } + + /* Call panic for real */ + __hyp_do_panic(__hyp_panic_string[cause], elr, val); + + unreachable(); +} diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c new file mode 100644 index 000000000000..a2636001e616 --- /dev/null +++ b/arch/arm/kvm/hyp/tlb.c @@ -0,0 +1,70 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/kvm_hyp.h> + +/** + * Flush per-VMID TLBs + * + * __kvm_tlb_flush_vmid(struct kvm *kvm); + * + * We rely on the hardware to broadcast the TLB invalidation to all CPUs + * inside the inner-shareable domain (which is the case for all v7 + * implementations). If we come across a non-IS SMP implementation, we'll + * have to use an IPI based mechanism. Until then, we stick to the simple + * hardware assisted version. + * + * As v7 does not support flushing per IPA, just nuke the whole TLB + * instead, ignoring the ipa value. + */ +static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + isb(); + + write_sysreg(0, TLBIALLIS); + dsb(ish); + isb(); + + write_sysreg(0, VTTBR); +} + +__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); + +static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +{ + __tlb_flush_vmid(kvm); +} + +__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, + phys_addr_t ipa); + +static void __hyp_text __tlb_flush_vm_context(void) +{ + write_sysreg(0, TLBIALLNSNHIS); + write_sysreg(0, ICIALLUIS); + dsb(ish); +} + +__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S new file mode 100644 index 000000000000..7c297e87eb8b --- /dev/null +++ b/arch/arm/kvm/hyp/vfp.S @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/vfpmacros.h> + + .text + .pushsection .hyp.text, "ax" + +/* void __vfp_save_state(struct vfp_hard_struct *vfp); */ +ENTRY(__vfp_save_state) + push {r4, r5} + VFPFMRX r1, FPEXC + + @ Make sure *really* VFP is enabled so we can touch the registers. + orr r5, r1, #FPEXC_EN + tst r5, #FPEXC_EX @ Check for VFP Subarchitecture + bic r5, r5, #FPEXC_EX @ FPEXC_EX disable + VFPFMXR FPEXC, r5 + isb + + VFPFMRX r2, FPSCR + beq 1f + + @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so + @ we only need to save them if FPEXC_EX is set. + VFPFMRX r3, FPINST + tst r5, #FPEXC_FP2V + VFPFMRX r4, FPINST2, ne @ vmrsne +1: + VFPFSTMIA r0, r5 @ Save VFP registers + stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2 + pop {r4, r5} + bx lr +ENDPROC(__vfp_save_state) + +/* void __vfp_restore_state(struct vfp_hard_struct *vfp); + * Assume FPEXC_EN is on and FPEXC_EX is off */ +ENTRY(__vfp_restore_state) + VFPFLDMIA r0, r1 @ Load VFP registers + ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2 + + VFPFMXR FPSCR, r1 + tst r0, #FPEXC_EX @ Check for VFP Subarchitecture + beq 1f + VFPFMXR FPINST, r2 + tst r0, #FPEXC_FP2V + VFPFMXR FPINST2, r3, ne +1: + VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN) + bx lr +ENDPROC(__vfp_restore_state) + + .popsection diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..1f9ae17476f9 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -84,14 +84,6 @@ __do_hyp_init: orr r0, r0, r1 mcr p15, 4, r0, c2, c0, 2 @ HTCR - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r2, =VTCR_MASK - bic r1, r1, r2 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - @ Use the same memory attributes for hyp. accesses as the kernel @ (copy MAIRx ro HMAIRx). mrc p15, 0, r0, c10, c2, 0 diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 900ef6dd8f72..b1bd316f14c0 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -17,211 +17,14 @@ */ #include <linux/linkage.h> -#include <linux/const.h> -#include <asm/unified.h> -#include <asm/page.h> -#include <asm/ptrace.h> -#include <asm/asm-offsets.h> -#include <asm/kvm_asm.h> -#include <asm/kvm_arm.h> -#include <asm/vfpmacros.h> -#include "interrupts_head.S" .text -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start - -/******************************************************************** - * Flush per-VMID TLBs - * - * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - * - * As v7 does not support flushing per IPA, just nuke the whole TLB - * instead, ignoring the ipa value. - */ -ENTRY(__kvm_tlb_flush_vmid_ipa) - push {r2, r3} - - dsb ishst - add r0, r0, #KVM_VTTBR - ldrd r2, r3, [r0] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - isb - mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) - dsb ish - isb - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 - isb @ Not necessary if followed by eret - - pop {r2, r3} - bx lr -ENDPROC(__kvm_tlb_flush_vmid_ipa) - -/** - * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs - * - * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address - * parameter - */ - -ENTRY(__kvm_tlb_flush_vmid) - b __kvm_tlb_flush_vmid_ipa -ENDPROC(__kvm_tlb_flush_vmid) - -/******************************************************************** - * Flush TLBs and instruction caches of all CPUs inside the inner-shareable - * domain, for all VMIDs - * - * void __kvm_flush_vm_context(void); - */ -ENTRY(__kvm_flush_vm_context) - mov r0, #0 @ rn parameter for c15 flushes is SBZ - - /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ - mcr p15, 4, r0, c8, c3, 4 - /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ - mcr p15, 0, r0, c7, c1, 0 - dsb ish - isb @ Not necessary if followed by eret - - bx lr -ENDPROC(__kvm_flush_vm_context) - - -/******************************************************************** - * Hypervisor world-switch code - * - * - * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) - */ -ENTRY(__kvm_vcpu_run) - @ Save the vcpu pointer - mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR - - save_host_regs - - restore_vgic_state - restore_timer_state - - @ Store hardware CP15 state and load guest state - read_cp15_state store_to_vcpu = 0 - write_cp15_state read_from_vcpu = 1 - - @ If the host kernel has not been configured with VFPv3 support, - @ then it is safer if we deny guests from using it as well. -#ifdef CONFIG_VFPv3 - @ Set FPEXC_EN so the guest doesn't trap floating point instructions - VFPFMRX r2, FPEXC @ VMRS - push {r2} - orr r2, r2, #FPEXC_EN - VFPFMXR FPEXC, r2 @ VMSR -#endif - - @ Configure Hyp-role - configure_hyp_role vmentry - - @ Trap coprocessor CRx accesses - set_hstr vmentry - set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - set_hdcr vmentry - - @ Write configured ID register into MIDR alias - ldr r1, [vcpu, #VCPU_MIDR] - mcr p15, 4, r1, c0, c0, 0 - - @ Write guest view of MPIDR into VMPIDR - ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] - mcr p15, 4, r1, c0, c0, 5 - - @ Set up guest memory translation - ldr r1, [vcpu, #VCPU_KVM] - add r1, r1, #KVM_VTTBR - ldrd r2, r3, [r1] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - - @ We're all done, just restore the GPRs and go to the guest - restore_guest_regs - clrex @ Clear exclusive monitor - eret - -__kvm_vcpu_return: - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - save_guest_regs - - @ Set VMID == 0 - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ Don't trap coprocessor accesses for host kernel - set_hstr vmexit - set_hdcr vmexit - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore - -#ifdef CONFIG_VFPv3 - @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST - store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST - ldr r7, [r7] - restore_vfp_state r7 - -after_vfp_restore: - @ Restore FPEXC_EN which we clobbered on entry - pop {r2} - VFPFMXR FPEXC, r2 -#else -after_vfp_restore: -#endif - - @ Reset Hyp-role - configure_hyp_role vmexit - - @ Let host read hardware MIDR - mrc p15, 0, r2, c0, c0, 0 - mcr p15, 4, r2, c0, c0, 0 - - @ Back to hardware MPIDR - mrc p15, 0, r2, c0, c0, 5 - mcr p15, 4, r2, c0, c0, 5 - - @ Store guest CP15 state and restore host state - read_cp15_state store_to_vcpu = 1 - write_cp15_state read_from_vcpu = 0 - - save_timer_state - save_vgic_state - - restore_host_regs - clrex @ Clear exclusive monitor -#ifndef CONFIG_CPU_ENDIAN_BE8 - mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value -#else - @ r1 already has return code - mov r0, #0 @ Clear upper bits in return value -#endif /* CONFIG_CPU_ENDIAN_BE8 */ - bx lr @ return to IOCTL - /******************************************************************** * Call function in Hyp mode * * - * u64 kvm_call_hyp(void *hypfn, ...); + * unsigned long kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers @@ -232,7 +35,7 @@ after_vfp_restore: * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in r0 (strictly 32bit). * * A function pointer with a value of 0xffffffff has a special meaning, * and is used to implement __hyp_get_vectors in the same way as in @@ -246,281 +49,4 @@ after_vfp_restore: ENTRY(kvm_call_hyp) hvc #0 bx lr - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0 and if - * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - -/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ -.macro bad_exception exception_code, panic_str - push {r0-r2} - mrrc p15, 6, r0, r1, c2 @ Read VTTBR - lsr r1, r1, #16 - ands r1, r1, #0xff - beq 99f - - load_vcpu @ Load VCPU pointer - .if \exception_code == ARM_EXCEPTION_DATA_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 0 @ HDFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - .if \exception_code == ARM_EXCEPTION_PREF_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 2 @ HIFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - mov r1, #\exception_code - b __kvm_vcpu_return - - @ We were in the host already. Let's craft a panic-ing return to SVC. -99: mrs r2, cpsr - bic r2, r2, #MODE_MASK - orr r2, r2, #SVC_MODE -THUMB( orr r2, r2, #PSR_T_BIT ) - msr spsr_cxsf, r2 - mrs r1, ELR_hyp - ldr r2, =panic - msr ELR_hyp, r2 - ldr r0, =\panic_str - clrex @ Clear exclusive monitor - eret -.endm - - .text - - .align 5 -__kvm_hyp_vector: - .globl __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - - .align -hyp_reset: - b hyp_reset - - .align -hyp_undef: - bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str - - .align -hyp_svc: - bad_exception ARM_EXCEPTION_HVC, svc_die_str - - .align -hyp_pabt: - bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str - - .align -hyp_dabt: - bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str - - .align -hyp_hvc: - /* - * Getting here is either becuase of a trap from a guest or from calling - * HVC from the host kernel, which means "switch to Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_trap @ Guest called HVC - - /* - * Getting here means host called HVC, we shift parameters and branch - * to Hyp function. - */ - pop {r0, r1, r2} - - /* Check for __hyp_get_vectors */ - cmp r0, #-1 - mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR - beq 1f - - push {lr} - mrs lr, SPSR - push {lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {lr} - msr SPSR_csxf, lr - pop {lr} -1: eret - -guest_trap: - load_vcpu @ Load VCPU pointer to r0 - str r1, [vcpu, #VCPU_HSR] - - @ Check if we need the fault information - lsr r1, r1, #HSR_EC_SHIFT -#ifdef CONFIG_VFPv3 - cmp r1, #HSR_EC_CP_0_13 - beq switch_to_guest_vfp -#endif - cmp r1, #HSR_EC_IABT - mrceq p15, 4, r2, c6, c0, 2 @ HIFAR - beq 2f - cmp r1, #HSR_EC_DABT - bne 1f - mrc p15, 4, r2, c6, c0, 0 @ HDFAR - -2: str r2, [vcpu, #VCPU_HxFAR] - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - - /* Check for permission fault, and S1PTW */ - mrc p15, 4, r1, c5, c2, 0 @ HSR - and r0, r1, #HSR_FSC_TYPE - cmp r0, #FSC_PERM - tsteq r1, #(1 << 7) @ S1PTW - mrcne p15, 4, r2, c6, c0, 4 @ HPFAR - bne 3f - - /* Preserve PAR */ - mrrc p15, 0, r0, r1, c7 @ PAR - push {r0, r1} - - /* Resolve IPA using the xFAR */ - mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR - isb - mrrc p15, 0, r0, r1, c7 @ PAR - tst r0, #1 - bne 4f @ Failed translation - ubfx r2, r0, #12, #20 - lsl r2, r2, #4 - orr r2, r2, r1, lsl #24 - - /* Restore PAR */ - pop {r0, r1} - mcrr p15, 0, r0, r1, c7 @ PAR - -3: load_vcpu @ Load VCPU pointer to r0 - str r2, [r0, #VCPU_HPFAR] - -1: mov r1, #ARM_EXCEPTION_HVC - b __kvm_vcpu_return - -4: pop {r0, r1} @ Failed translation, return to guest - mcrr p15, 0, r0, r1, c7 @ PAR - clrex - pop {r0, r1, r2} - eret - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -switch_to_guest_vfp: - push {r3-r7} - - @ NEON/VFP used. Turn on VFP access. - set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) - - @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST - ldr r7, [r7] - store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST - restore_vfp_state r7 - - pop {r3-r7} - pop {r0-r2} - clrex - eret -#endif - - .align -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu @ Load VCPU pointer to r0 - b __kvm_vcpu_return - - .align -hyp_fiq: - b hyp_fiq - - .ltorg - -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end - - .section ".rodata" - -und_die_str: - .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" -pabt_die_str: - .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" -dabt_die_str: - .ascii "unexpected data abort in Hyp mode at: %#08x\n" -svc_die_str: - .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" +ENDPROC(kvm_call_hyp) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S deleted file mode 100644 index 51a59504bef4..000000000000 --- a/arch/arm/kvm/interrupts_head.S +++ /dev/null @@ -1,648 +0,0 @@ -#include <linux/irqchip/arm-gic.h> -#include <asm/assembler.h> - -#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) -#define VCPU_USR_SP (VCPU_USR_REG(13)) -#define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) - -/* - * Many of these macros need to access the VCPU structure, which is always - * held in r0. These macros should never clobber r1, as it is used to hold the - * exception code on the return path (except of course the macro that switches - * all the registers before the final jump to the VM). - */ -vcpu .req r0 @ vcpu pointer always in r0 - -/* Clobbers {r2-r6} */ -.macro store_vfp_state vfp_base - @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions - VFPFMRX r2, FPEXC - @ Make sure VFP is enabled so we can touch the registers. - orr r6, r2, #FPEXC_EN - VFPFMXR FPEXC, r6 - - VFPFMRX r3, FPSCR - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r4, FPINST - tst r2, #FPEXC_FP2V - VFPFMRX r5, FPINST2, ne @ vmrsne - bic r6, r2, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r6 -1: - VFPFSTMIA \vfp_base, r6 @ Save VFP registers - stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 -.endm - -/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ -.macro restore_vfp_state vfp_base - VFPFLDMIA \vfp_base, r6 @ Load VFP registers - ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r3 - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r4 - tst r2, #FPEXC_FP2V - VFPFMXR FPINST2, r5, ne -1: - VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) -.endm - -/* These are simply for the macros to work - value don't have meaning */ -.equ usr, 0 -.equ svc, 1 -.equ abt, 2 -.equ und, 3 -.equ irq, 4 -.equ fiq, 5 - -.macro push_host_regs_mode mode - mrs r2, SP_\mode - mrs r3, LR_\mode - mrs r4, SPSR_\mode - push {r2, r3, r4} -.endm - -/* - * Store all host persistent registers on the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro save_host_regs - /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ - mrs r2, ELR_hyp - push {r2} - - /* usr regs */ - push {r4-r12} @ r0-r3 are always clobbered - mrs r2, SP_usr - mov r3, lr - push {r2, r3} - - push_host_regs_mode svc - push_host_regs_mode abt - push_host_regs_mode und - push_host_regs_mode irq - - /* fiq regs */ - mrs r2, r8_fiq - mrs r3, r9_fiq - mrs r4, r10_fiq - mrs r5, r11_fiq - mrs r6, r12_fiq - mrs r7, SP_fiq - mrs r8, LR_fiq - mrs r9, SPSR_fiq - push {r2-r9} -.endm - -.macro pop_host_regs_mode mode - pop {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all host registers from the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro restore_host_regs - pop {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - pop_host_regs_mode irq - pop_host_regs_mode und - pop_host_regs_mode abt - pop_host_regs_mode svc - - pop {r2, r3} - msr SP_usr, r2 - mov lr, r3 - pop {r4-r12} - - pop {r2} - msr ELR_hyp, r2 -.endm - -/* - * Restore SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r1, r2, r3, r4. - */ -.macro restore_guest_regs_mode mode, offset - add r1, vcpu, \offset - ldm r1, {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all guest registers from the vcpu struct. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers *all* registers. - */ -.macro restore_guest_regs - restore_guest_regs_mode svc, #VCPU_SVC_REGS - restore_guest_regs_mode abt, #VCPU_ABT_REGS - restore_guest_regs_mode und, #VCPU_UND_REGS - restore_guest_regs_mode irq, #VCPU_IRQ_REGS - - add r1, vcpu, #VCPU_FIQ_REGS - ldm r1, {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - @ Load return state - ldr r2, [vcpu, #VCPU_PC] - ldr r3, [vcpu, #VCPU_CPSR] - msr ELR_hyp, r2 - msr SPSR_cxsf, r3 - - @ Load user registers - ldr r2, [vcpu, #VCPU_USR_SP] - ldr r3, [vcpu, #VCPU_USR_LR] - msr SP_usr, r2 - mov lr, r3 - add vcpu, vcpu, #(VCPU_USR_REGS) - ldm vcpu, {r0-r12} -.endm - -/* - * Save SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs_mode mode, offset - add r2, vcpu, \offset - mrs r3, SP_\mode - mrs r4, LR_\mode - mrs r5, SPSR_\mode - stm r2, {r3, r4, r5} -.endm - -/* - * Save all guest registers to the vcpu struct - * Expects guest's r0, r1, r2 on the stack. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs - @ Store usr registers - add r2, vcpu, #VCPU_USR_REG(3) - stm r2, {r3-r12} - add r2, vcpu, #VCPU_USR_REG(0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3, r4, r5} - mrs r2, SP_usr - mov r3, lr - str r2, [vcpu, #VCPU_USR_SP] - str r3, [vcpu, #VCPU_USR_LR] - - @ Store return state - mrs r2, ELR_hyp - mrs r3, spsr - str r2, [vcpu, #VCPU_PC] - str r3, [vcpu, #VCPU_CPSR] - - @ Store other guest registers - save_guest_regs_mode svc, #VCPU_SVC_REGS - save_guest_regs_mode abt, #VCPU_ABT_REGS - save_guest_regs_mode und, #VCPU_UND_REGS - save_guest_regs_mode irq, #VCPU_IRQ_REGS -.endm - -/* Reads cp15 registers from hardware and stores them in memory - * @store_to_vcpu: If 0, registers are written in-order to the stack, - * otherwise to the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2 - r12 - */ -.macro read_cp15_state store_to_vcpu - mrc p15, 0, r2, c1, c0, 0 @ SCTLR - mrc p15, 0, r3, c1, c0, 2 @ CPACR - mrc p15, 0, r4, c2, c0, 2 @ TTBCR - mrc p15, 0, r5, c3, c0, 0 @ DACR - mrrc p15, 0, r6, r7, c2 @ TTBR 0 - mrrc p15, 1, r8, r9, c2 @ TTBR 1 - mrc p15, 0, r10, c10, c2, 0 @ PRRR - mrc p15, 0, r11, c10, c2, 1 @ NMRR - mrc p15, 2, r12, c0, c0, 0 @ CSSELR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - str r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r2, vcpu, #CP15_OFFSET(c2_TTBR0) - strd r6, r7, [r2] - add r2, vcpu, #CP15_OFFSET(c2_TTBR1) - strd r8, r9, [r2] - str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mrc p15, 0, r2, c13, c0, 1 @ CID - mrc p15, 0, r3, c13, c0, 2 @ TID_URW - mrc p15, 0, r4, c13, c0, 3 @ TID_URO - mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV - mrc p15, 0, r6, c5, c0, 0 @ DFSR - mrc p15, 0, r7, c5, c0, 1 @ IFSR - mrc p15, 0, r8, c5, c1, 0 @ ADFSR - mrc p15, 0, r9, c5, c1, 1 @ AIFSR - mrc p15, 0, r10, c6, c0, 0 @ DFAR - mrc p15, 0, r11, c6, c0, 2 @ IFAR - mrc p15, 0, r12, c12, c0, 0 @ VBAR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c13_CID)] - str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL - mrrc p15, 0, r4, r5, c7 @ PAR - mrc p15, 0, r6, c10, c3, 0 @ AMAIR0 - mrc p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \store_to_vcpu == 0 - push {r2,r4-r7} - .else - str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - strd r4, r5, [r12] - str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif -.endm - -/* - * Reads cp15 registers from memory and writes them to hardware - * @read_from_vcpu: If 0, registers are read in-order from the stack, - * otherwise from the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - */ -.macro write_cp15_state read_from_vcpu - .if \read_from_vcpu == 0 - pop {r2,r4-r7} - .else - ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - ldrd r4, r5, [r12] - ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif - - mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL - mcrr p15, 0, r4, r5, c7 @ PAR - mcr p15, 0, r6, c10, c3, 0 @ AMAIR0 - mcr p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] - ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mcr p15, 0, r2, c13, c0, 1 @ CID - mcr p15, 0, r3, c13, c0, 2 @ TID_URW - mcr p15, 0, r4, c13, c0, 3 @ TID_URO - mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV - mcr p15, 0, r6, c5, c0, 0 @ DFSR - mcr p15, 0, r7, c5, c0, 1 @ IFSR - mcr p15, 0, r8, c5, c1, 0 @ ADFSR - mcr p15, 0, r9, c5, c1, 1 @ AIFSR - mcr p15, 0, r10, c6, c0, 0 @ DFAR - mcr p15, 0, r11, c6, c0, 2 @ IFAR - mcr p15, 0, r12, c12, c0, 0 @ VBAR - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r12, vcpu, #CP15_OFFSET(c2_TTBR0) - ldrd r6, r7, [r12] - add r12, vcpu, #CP15_OFFSET(c2_TTBR1) - ldrd r8, r9, [r12] - ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mcr p15, 0, r2, c1, c0, 0 @ SCTLR - mcr p15, 0, r3, c1, c0, 2 @ CPACR - mcr p15, 0, r4, c2, c0, 2 @ TTBCR - mcr p15, 0, r5, c3, c0, 0 @ DACR - mcrr p15, 0, r6, r7, c2 @ TTBR 0 - mcrr p15, 1, r8, r9, c2 @ TTBR 1 - mcr p15, 0, r10, c10, c2, 0 @ PRRR - mcr p15, 0, r11, c10, c2, 1 @ NMRR - mcr p15, 2, r12, c0, c0, 0 @ CSSELR -.endm - -/* - * Save the VGIC CPU state into memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro save_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr r4, [r2, #GICH_VMCR] - ldr r5, [r2, #GICH_MISR] - ldr r6, [r2, #GICH_EISR0] - ldr r7, [r2, #GICH_EISR1] - ldr r8, [r2, #GICH_ELRSR0] - ldr r9, [r2, #GICH_ELRSR1] - ldr r10, [r2, #GICH_APR] -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r5, r5 ) -ARM_BE8(rev r6, r6 ) -ARM_BE8(rev r7, r7 ) -ARM_BE8(rev r8, r8 ) -ARM_BE8(rev r9, r9 ) -ARM_BE8(rev r10, r10 ) - - str r4, [r11, #VGIC_V2_CPU_VMCR] - str r5, [r11, #VGIC_V2_CPU_MISR] -#ifdef CONFIG_CPU_ENDIAN_BE8 - str r6, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r7, [r11, #VGIC_V2_CPU_EISR] - str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)] - str r9, [r11, #VGIC_V2_CPU_ELRSR] -#else - str r6, [r11, #VGIC_V2_CPU_EISR] - str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r8, [r11, #VGIC_V2_CPU_ELRSR] - str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] -#endif - str r10, [r11, #VGIC_V2_CPU_APR] - - /* Clear GICH_HCR */ - mov r5, #0 - str r5, [r2, #GICH_HCR] - - /* Save list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r2], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r3], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -/* - * Restore the VGIC CPU state from memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro restore_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr r3, [r11, #VGIC_V2_CPU_HCR] - ldr r4, [r11, #VGIC_V2_CPU_VMCR] - ldr r8, [r11, #VGIC_V2_CPU_APR] -ARM_BE8(rev r3, r3 ) -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r8, r8 ) - - str r3, [r2, #GICH_HCR] - str r4, [r2, #GICH_VMCR] - str r8, [r2, #GICH_APR] - - /* Restore list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r3], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r2], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -#define CNTHCTL_PL1PCTEN (1 << 0) -#define CNTHCTL_PL1PCEN (1 << 1) - -/* - * Save the timer state onto the VCPU and allow physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro save_timer_state - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL - str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - - isb - - mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - strd r2, r3, [r5] - - @ Ensure host CNTVCT == CNTPCT - mov r2, #0 - mcrr p15, 4, r2, r2, c14 @ CNTVOFF - -1: - mov r2, #0 @ Clear ENABLE - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL - - @ Allow physical timer/counter access for the host - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL -.endm - -/* - * Load the timer state from the VCPU and deny physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro restore_timer_state - @ Disallow physical timer access for the guest - @ Physical counter access is allowed - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #CNTHCTL_PL1PCTEN - bic r2, r2, #CNTHCTL_PL1PCEN - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL - - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - ldr r2, [r4, #KVM_TIMER_CNTVOFF] - ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] - mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF - - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - ldrd r2, r3, [r5] - mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - isb - - ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - and r2, r2, #3 - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL -1: -.endm - -.equ vmentry, 0 -.equ vmexit, 1 - -/* Configures the HSTR (Hyp System Trap Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hstr operation - mrc p15, 4, r2, c1, c1, 3 - ldr r3, =HSTR_T(15) - .if \operation == vmentry - orr r2, r2, r3 @ Trap CR{15} - .else - bic r2, r2, r3 @ Don't trap any CRx accesses - .endif - mcr p15, 4, r2, c1, c1, 3 -.endm - -/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return - * (hardware reset value is 0). Keep previous value in r2. - * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if - * VFP wasn't already enabled (always executed on vmtrap). - * If a label is specified with vmexit, it is branched to if VFP wasn't - * enabled. - */ -.macro set_hcptr operation, mask, label = none - mrc p15, 4, r2, c1, c1, 2 - ldr r3, =\mask - .if \operation == vmentry - orr r3, r2, r3 @ Trap coproc-accesses defined in mask - .else - bic r3, r2, r3 @ Don't trap defined coproc-accesses - .endif - mcr p15, 4, r3, c1, c1, 2 - .if \operation != vmentry - .if \operation == vmexit - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - beq 1f - .endif - isb - .if \label != none - b \label - .endif -1: - .endif -.endm - -/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hdcr operation - mrc p15, 4, r2, c1, c1, 1 - ldr r3, =(HDCR_TPM|HDCR_TPMCR) - .if \operation == vmentry - orr r2, r2, r3 @ Trap some perfmon accesses - .else - bic r2, r2, r3 @ Don't trap any perfmon accesses - .endif - mcr p15, 4, r2, c1, c1, 1 -.endm - -/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ -.macro configure_hyp_role operation - .if \operation == vmentry - ldr r2, [vcpu, #VCPU_HCR] - ldr r3, [vcpu, #VCPU_IRQ_LINES] - orr r2, r2, r3 - .else - mov r2, #0 - .endif - mcr p15, 4, r2, c1, c1, 0 @ HCR -.endm - -.macro load_vcpu - mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR -.endm diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index aba61fd3697a..58dbd5c439df 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -28,6 +28,7 @@ #include <asm/kvm_mmio.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> +#include <asm/virt.h> #include "trace.h" @@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + start = start & PAGE_MASK; end = PAGE_ALIGN(end); @@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + /* Check for a valid kernel IO mapping */ if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) return -EINVAL; @@ -1431,6 +1438,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) } /* + * Check for a cache maintenance operation. Since we + * ended-up here, we know it is outside of any memory + * slot. But we can't find out if that is for a device, + * or if the guest is just being stupid. The only thing + * we know for sure is that this range cannot be cached. + * + * So let's assume that the guest is just being + * cautious, and skip the instruction. + */ + if (kvm_vcpu_dabt_is_cm(vcpu)) { + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + ret = 1; + goto out_unlock; + } + + /* * The IPA is reported as [MAX:12], so we need to * complement it with the bottom 12 bits from the * faulting VA. This is always 12 bits, irrespective diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index eeb85858d6bb..0048b5a62a50 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -71,7 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } /* Reset core registers */ - memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); + memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); |