diff options
author | James Hogan <james.hogan@imgtec.com> | 2016-07-08 13:53:25 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-08-01 19:42:24 +0300 |
commit | 0d17aea5c27d7d748b1d8116d275b2b17dc5cad6 (patch) | |
tree | 39616859bb440c62c4421ff3ad4e56532492ec89 /arch | |
parent | 1d756942533b2330d8929dd0ea61a81a5d020196 (diff) | |
download | linux-0d17aea5c27d7d748b1d8116d275b2b17dc5cad6.tar.xz |
MIPS: KVM: Use 64-bit CP0_EBase when appropriate
Update the KVM entry point to write CP0_EBase as a 64-bit register when
it is 64-bits wide, and to set the WG (write gate) bit if it exists in
order to write bits 63:30 (or 31:30 on MIPS32).
Prior to MIPS64r6 it was UNDEFINED to perform a 64-bit read or write of
a 32-bit COP0 register. Since this is dynamically generated code,
generate the right type of access depending on whether the kernel is
64-bit and cpu_has_ebase_wg.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/kvm/entry.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index c824bfc4daa0..6a02b3a3fa65 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -153,6 +153,25 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, } /** + * build_set_exc_base() - Assemble code to write exception base address. + * @p: Code buffer pointer. + * @reg: Source register (generated code may set WG bit in @reg). + * + * Assemble code to modify the exception base address in the EBase register, + * using the appropriately sized access and setting the WG bit if necessary. + */ +static inline void build_set_exc_base(u32 **p, unsigned int reg) +{ + if (cpu_has_ebase_wg) { + /* Set WG so that all the bits get written */ + uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); + UASM_i_MTC0(p, reg, C0_EBASE); + } else { + uasm_i_mtc0(p, reg, C0_EBASE); + } +} + +/** * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. * @addr: Address to start writing code. * @@ -216,7 +235,7 @@ void *kvm_mips_build_vcpu_run(void *addr) /* load up the new EBASE */ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); - uasm_i_mtc0(&p, K0, C0_EBASE); + build_set_exc_base(&p, K0); /* * Now that the new EBASE has been loaded, unset BEV, set @@ -463,7 +482,7 @@ void *kvm_mips_build_exit(void *addr) UASM_i_LA_mostly(&p, K0, (long)&ebase); UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); - uasm_i_mtc0(&p, K0, C0_EBASE); + build_set_exc_base(&p, K0); if (raw_cpu_has_fpu) { /* @@ -620,7 +639,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr) uasm_i_or(&p, K0, V1, AT); uasm_i_mtc0(&p, K0, C0_STATUS); uasm_i_ehb(&p); - uasm_i_mtc0(&p, T0, C0_EBASE); + build_set_exc_base(&p, T0); /* Setup status register for running guest in UM */ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); |