summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp/switch.c6
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c6
-rw-r--r--arch/arm64/kvm/hyp/tlb.c11
-rw-r--r--arch/arm64/kvm/reset.c65
-rw-r--r--arch/arm64/kvm/sys_regs.c6
5 files changed, 66 insertions, 28 deletions
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 8a1e81a400e0..1336e6f0acdf 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -138,7 +138,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb();
@@ -181,7 +181,7 @@ static void deactivate_traps_vhe(void)
* above before we can switch to the EL2/EL0 translation regime used by
* the host.
*/
- asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
@@ -192,7 +192,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
/*
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 6d2df9fe0b5d..ea5d22fbdacf 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -107,7 +107,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (has_vhe() ||
+ !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
} else if (!ctxt->__hyp_running_vcpu) {
@@ -138,7 +139,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
+ if (!has_vhe() &&
+ cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
ctxt->__hyp_running_vcpu) {
/*
* Must only be done for host registers, hence the context
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index ceaddbe4279f..d063a576d511 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
local_irq_save(cxt->flags);
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/*
* For CPUs that are affected by ARM errata 1165522 or 1530923,
* we cannot trust stage-1 to be in a correct state at that
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
/*
@@ -79,8 +79,9 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
isb();
}
+ /* __load_guest_stage2() includes an ISB for the workaround. */
__load_guest_stage2(kvm);
- isb();
+ asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
@@ -103,7 +104,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb();
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, SYS_TCR);
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +118,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
{
write_sysreg(0, vttbr_el2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Ensure write of the host VMID */
isb();
/* Restore the host's TCR_EL1 */
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 30b7ea680f66..70cd7bcca433 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -46,14 +46,6 @@ static const struct kvm_regs default_regs_reset32 = {
PSR_AA32_I_BIT | PSR_AA32_F_BIT),
};
-static bool cpu_has_32bit_el1(void)
-{
- u64 pfr0;
-
- pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- return !!(pfr0 & 0x20);
-}
-
/**
* kvm_arch_vm_ioctl_check_extension
*
@@ -66,7 +58,7 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
switch (ext) {
case KVM_CAP_ARM_EL1_32BIT:
- r = cpu_has_32bit_el1();
+ r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
break;
case KVM_CAP_GUEST_DEBUG_HW_BPS:
r = get_num_brps();
@@ -288,7 +280,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpu_has_32bit_el1())
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
goto out;
cpu_reset = &default_regs_reset32;
} else {
@@ -340,11 +332,50 @@ out:
return ret;
}
-void kvm_set_ipa_limit(void)
+u32 get_kvm_ipa_limit(void)
+{
+ return kvm_ipa_limit;
+}
+
+int kvm_set_ipa_limit(void)
{
- unsigned int ipa_max, pa_max, va_max, parange;
+ unsigned int ipa_max, pa_max, va_max, parange, tgran_2;
+ u64 mmfr0;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ parange = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_PARANGE_SHIFT);
+
+ /*
+ * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
+ * Stage-2. If not, things will stop very quickly.
+ */
+ switch (PAGE_SIZE) {
+ default:
+ case SZ_4K:
+ tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
+ break;
+ case SZ_16K:
+ tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
+ break;
+ case SZ_64K:
+ tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
+ break;
+ }
+
+ switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
+ default:
+ case 1:
+ kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
+ return -EINVAL;
+ case 0:
+ kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
+ break;
+ case 2:
+ kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
+ break;
+ }
- parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
/* Clamp the IPA limit to the PA size supported by the kernel */
@@ -378,6 +409,8 @@ void kvm_set_ipa_limit(void)
"KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
kvm_ipa_limit = ipa_max;
kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
+
+ return 0;
}
/*
@@ -390,7 +423,7 @@ void kvm_set_ipa_limit(void)
*/
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
{
- u64 vtcr = VTCR_EL2_FLAGS;
+ u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
u32 parange, phys_shift;
u8 lvls;
@@ -406,7 +439,9 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
phys_shift = KVM_PHYS_SHIFT;
}
- parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ parange = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX;
vtcr |= parange << VTCR_EL2_PS_SHIFT;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 51db934702b6..7d7a39b01135 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1456,9 +1456,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_SANITISED(MVFR1_EL1),
ID_SANITISED(MVFR2_EL1),
ID_UNALLOCATED(3,3),
- ID_UNALLOCATED(3,4),
- ID_UNALLOCATED(3,5),
- ID_UNALLOCATED(3,6),
+ ID_SANITISED(ID_PFR2_EL1),
+ ID_HIDDEN(ID_DFR1_EL1),
+ ID_SANITISED(ID_MMFR5_EL1),
ID_UNALLOCATED(3,7),
/* AArch64 ID registers */