diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2017-06-15 11:35:15 +0300 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2017-06-15 11:35:15 +0300 |
commit | 6f2f10cabe73944488a62df16695c86e20d4c3f9 (patch) | |
tree | 514fa4c6127892ed55d5a00611ce95555c3ce011 /arch | |
parent | ebb127f2d6f32665643165289151bd20929d9931 (diff) | |
parent | 33b5c38852b29736f3b472dd095c9a18ec22746f (diff) | |
download | linux-6f2f10cabe73944488a62df16695c86e20d4c3f9.tar.xz |
Merge branch 'kvmarm-master/master' into HEAD
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/kvm_coproc.h | 3 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.c | 106 | ||||
-rw-r--r-- | arch/arm/kvm/handle_exit.c | 4 | ||||
-rw-r--r-- | arch/arm/kvm/hyp/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kvm/hyp/switch.c | 4 | ||||
-rw-r--r-- | arch/arm/kvm/init.S | 5 | ||||
-rw-r--r-- | arch/arm/kvm/trace.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp-init.S | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic-sys-reg-v3.c | 10 |
11 files changed, 107 insertions, 52 deletions
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h index 4917c2f7e459..e74ab0fbab79 100644 --- a/arch/arm/include/asm/kvm_coproc.h +++ b/arch/arm/include/asm/kvm_coproc.h @@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 2c14b69511e9..6d1d2e26dfe5 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -32,6 +32,7 @@ #include <asm/vfp.h> #include "../vfp/vfpinstr.h" +#define CREATE_TRACE_POINTS #include "trace.h" #include "coproc.h" @@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { /* @@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, * must always support PMCCNTR (the cycle counter): we just RAZ/WI for * all PM registers, which doesn't crash the guest kernel at least. */ -static bool pm_fake(struct kvm_vcpu *vcpu, +static bool trap_raz_wi(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { @@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu, return read_zero(vcpu, p); } -#define access_pmcr pm_fake -#define access_pmcntenset pm_fake -#define access_pmcntenclr pm_fake -#define access_pmovsr pm_fake -#define access_pmselr pm_fake -#define access_pmceid0 pm_fake -#define access_pmceid1 pm_fake -#define access_pmccntr pm_fake -#define access_pmxevtyper pm_fake -#define access_pmxevcntr pm_fake -#define access_pmuserenr pm_fake -#define access_pmintenset pm_fake -#define access_pmintenclr pm_fake +#define access_pmcr trap_raz_wi +#define access_pmcntenset trap_raz_wi +#define access_pmcntenclr trap_raz_wi +#define access_pmovsr trap_raz_wi +#define access_pmselr trap_raz_wi +#define access_pmceid0 trap_raz_wi +#define access_pmceid1 trap_raz_wi +#define access_pmccntr trap_raz_wi +#define access_pmxevtyper trap_raz_wi +#define access_pmxevcntr trap_raz_wi +#define access_pmuserenr trap_raz_wi +#define access_pmintenset trap_raz_wi +#define access_pmintenclr trap_raz_wi /* Architected CP15 registers. * CRn denotes the primary register number, but is copied to the CRm in the @@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, return 1; } -/** - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) { struct coproc_params params; @@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; params.CRm = 0; + return params; +} + +/** + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct coproc_params params = decode_64bit_hsr(vcpu); + return emulate_cp15(vcpu, ¶ms); } +/** + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct coproc_params params = decode_64bit_hsr(vcpu); + + /* raz_wi cp14 */ + trap_raz_wi(vcpu, ¶ms, NULL); + + /* handled */ + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; +} + static void reset_coproc_regs(struct kvm_vcpu *vcpu, const struct coproc_reg *table, size_t num) { @@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu, table[i].reset(vcpu, &table[i]); } -/** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) { struct coproc_params params; @@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; params.Rt2 = 0; + return params; +} + +/** + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct coproc_params params = decode_32bit_hsr(vcpu); return emulate_cp15(vcpu, ¶ms); } +/** + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct coproc_params params = decode_32bit_hsr(vcpu); + + /* raz_wi cp14 */ + trap_raz_wi(vcpu, ¶ms, NULL); + + /* handled */ + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; +} + /****************************************************************************** * Userspace API *****************************************************************************/ diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index a2b4f7b82356..54442e375354 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -96,9 +96,9 @@ static exit_handle_fn arm_exit_handlers[] = { [HSR_EC_WFI] = kvm_handle_wfx, [HSR_EC_CP15_32] = kvm_handle_cp15_32, [HSR_EC_CP15_64] = kvm_handle_cp15_64, - [HSR_EC_CP14_MR] = kvm_handle_cp14_access, + [HSR_EC_CP14_MR] = kvm_handle_cp14_32, [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, - [HSR_EC_CP14_64] = kvm_handle_cp14_access, + [HSR_EC_CP14_64] = kvm_handle_cp14_64, [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, [HSR_EC_CP10_ID] = kvm_handle_cp10_id, [HSR_EC_HVC] = handle_hvc, diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 3023bb530edf..8679405b0b2b 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -2,6 +2,8 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # +ccflags-y += -fno-stack-protector + KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index c8f15bb5c8b3..ebd2dd46adf7 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) write_sysreg(HSTR_T(15), HSTR); write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); val = read_sysreg(HDCR); - write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); + val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */ + val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */ + write_sysreg(val, HDCR); } static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 570ed4a9c261..5386528665b5 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -104,7 +104,6 @@ __do_hyp_init: @ - Write permission implies XN: disabled @ - Instruction cache: enabled @ - Data/Unified cache: enabled - @ - Memory alignment checks: enabled @ - MMU: enabled (this code must be run from an identity mapping) mrc p15, 4, r0, c1, c0, 0 @ HSCR ldr r2, =HSCTLR_MASK @@ -112,8 +111,8 @@ __do_hyp_init: mrc p15, 0, r1, c1, c0, 0 @ SCTLR ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) and r1, r1, r2 - ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) + ARM( ldr r2, =(HSCTLR_M) ) + THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) orr r1, r1, r2 orr r0, r0, r1 mcr p15, 4, r0, c1, c0, 0 @ HSCR diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index fc0943776db2..b0d10648c486 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h @@ -1,5 +1,5 @@ -#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_KVM_H +#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ARM_KVM_H #include <linux/tracepoint.h> @@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc, __entry->vcpu_pc, __entry->r0, __entry->imm) ); -#endif /* _TRACE_KVM_H */ +#endif /* _TRACE_ARM_KVM_H */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH arch/arm/kvm +#define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 15c142ce991c..b4d13d9267ff 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -286,6 +286,10 @@ #define SCTLR_ELx_A (1 << 1) #define SCTLR_ELx_M 1 +#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \ + (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \ + (1 << 28) | (1 << 29)) + #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ SCTLR_ELx_SA | SCTLR_ELx_I) diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 839425c24b1c..3f9615582377 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -106,10 +106,13 @@ __do_hyp_init: tlbi alle2 dsb sy - mrs x4, sctlr_el2 - and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 - ldr x5, =SCTLR_ELx_FLAGS - orr x4, x4, x5 + /* + * Preserve all the RES1 bits while setting the default flags, + * as well as the EE bit on BE. Drop the A flag since the compiler + * is allowed to generate unaligned accesses. + */ + ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) +CPU_BE( orr x4, x4, #SCTLR_ELx_EE) msr sctlr_el2, x4 isb diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index aaf42ae8d8c3..14c4e3b14bcb 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -2,6 +2,8 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # +ccflags-y += -fno-stack-protector + KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c index 79f37e37d367..6260b69e5622 100644 --- a/arch/arm64/kvm/vgic-sys-reg-v3.c +++ b/arch/arm64/kvm/vgic-sys-reg-v3.c @@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. * The vgic_set_vmcr() will convert to ICH_VMCR layout. */ - vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; - vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; + vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; + vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; vgic_set_vmcr(vcpu, &vmcr); } else { val = 0; @@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. * Extract it directly using ICC_CTLR_EL1 reg definitions. */ - val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; - val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; + val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; + val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; p->regval = val; } @@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, p->regval = 0; vgic_get_vmcr(vcpu, &vmcr); - if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { + if (!vmcr.cbpr) { if (p->is_write) { vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> ICC_BPR1_EL1_SHIFT; |