diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2023-02-14 02:30:37 +0300 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2023-02-14 02:30:37 +0300 |
commit | 3f1a14af5e17a8d03b2db2085861da68c8405ee3 (patch) | |
tree | a18db260f237987d8f6e8fc471c50633760c2906 /arch/arm64 | |
parent | 1b915210d9bed44512f6034eacda9e8dcd89bd93 (diff) | |
parent | 6f10f2ec61c7968b44428c551b1b06a4c5e4e544 (diff) | |
download | linux-3f1a14af5e17a8d03b2db2085861da68c8405ee3.tar.xz |
Merge branch kvm-arm64/psci-relay-fixes into kvmarm/next
* kvm-arm64/psci-relay-fixes:
: Fixes for CPU on/resume with pKVM, courtesy Quentin Perret.
:
: A consequence of deprivileging the host is that pKVM relays PSCI calls
: on behalf of the host. pKVM's CPU initialization failed to fully
: initialize the CPU's EL2 state, which notably led to unexpected SVE
: traps resulting in a hyp panic.
:
: The issue is addressed by reusing parts of __finalise_el2 to restore CPU
: state in the PSCI relay.
KVM: arm64: Finalise EL2 state from pKVM PSCI relay
KVM: arm64: Use sanitized values in __check_override in nVHE
KVM: arm64: Introduce finalise_el2_state macro
KVM: arm64: Provide sanitized SYS_ID_AA64SMFR0_EL1 to nVHE
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 99 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/hyp-stub.S | 86 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/hyp-init.S | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/sys_regs.c | 1 |
6 files changed, 105 insertions, 84 deletions
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..ea78c095a9c7 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -196,4 +196,103 @@ __init_el2_nvhe_prepare_eret .endm +#ifndef __KVM_NVHE_HYPERVISOR__ +// This will clobber tmp1 and tmp2, and expect tmp1 to contain +// the id register value as read from the HW +.macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2 + ubfx \tmp1, \tmp1, #\fld, #\width + cbz \tmp1, \fail + + adr_l \tmp1, \idreg\()_override + ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET] + ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET] + ubfx \tmp2, \tmp2, #\fld, #\width + ubfx \tmp1, \tmp1, #\fld, #\width + cmp \tmp1, xzr + and \tmp2, \tmp2, \tmp1 + csinv \tmp2, \tmp2, xzr, ne + cbnz \tmp2, \pass + b \fail +.endm + +// This will clobber tmp1 and tmp2 +.macro check_override idreg, fld, pass, fail, tmp1, tmp2 + mrs \tmp1, \idreg\()_el1 + __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2 +.endm +#else +// This will clobber tmp +.macro __check_override idreg, fld, width, pass, fail, tmp, ignore + ldr_l \tmp, \idreg\()_el1_sys_val + ubfx \tmp, \tmp, #\fld, #\width + cbnz \tmp, \pass + b \fail +.endm + +.macro check_override idreg, fld, pass, fail, tmp, ignore + __check_override \idreg \fld 4 \pass \fail \tmp \ignore +.endm +#endif + +.macro finalise_el2_state + check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 + +.Linit_sve_\@: /* SVE register access */ + mrs x0, cptr_el2 // Disable SVE traps + bic x0, x0, #CPTR_EL2_TZ + msr cptr_el2, x0 + isb + mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector + msr_s SYS_ZCR_EL2, x1 // length for EL1. + +.Lskip_sve_\@: + check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 + +.Linit_sme_\@: /* SME register access and priority mapping */ + mrs x0, cptr_el2 // Disable SME traps + bic x0, x0, #CPTR_EL2_TSM + msr cptr_el2, x0 + isb + + mrs x1, sctlr_el2 + orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps + msr sctlr_el2, x1 + isb + + mov x0, #0 // SMCR controls + + // Full FP in SM? + mrs_s x1, SYS_ID_AA64SMFR0_EL1 + __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2 + +.Linit_sme_fa64_\@: + orr x0, x0, SMCR_ELx_FA64_MASK +.Lskip_sme_fa64_\@: + + // ZT0 available? + mrs_s x1, SYS_ID_AA64SMFR0_EL1 + __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2 +.Linit_sme_zt0_\@: + orr x0, x0, SMCR_ELx_EZT0_MASK +.Lskip_sme_zt0_\@: + + orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector + msr_s SYS_SMCR_EL2, x0 // length for EL1. + + mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? + ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 + cbz x1, .Lskip_sme_\@ + + msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal + + mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? + ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 + cbz x1, .Lskip_sme_\@ + + mrs_s x1, SYS_HCRX_EL2 + orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping + msr_s SYS_HCRX_EL2, x1 +.Lskip_sme_\@: +.endm + #endif /* __ARM_KVM_INIT_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 6797eafe7890..bdd9cf546d95 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -122,6 +122,7 @@ extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val); extern unsigned long kvm_nvhe_sym(__icache_flags); extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits); diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 111ff33d93ee..9439240c3fcf 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -16,30 +16,6 @@ #include <asm/ptrace.h> #include <asm/virt.h> -// Warning, hardcoded register allocation -// This will clobber x1 and x2, and expect x1 to contain -// the id register value as read from the HW -.macro __check_override idreg, fld, width, pass, fail - ubfx x1, x1, #\fld, #\width - cbz x1, \fail - - adr_l x1, \idreg\()_override - ldr x2, [x1, FTR_OVR_VAL_OFFSET] - ldr x1, [x1, FTR_OVR_MASK_OFFSET] - ubfx x2, x2, #\fld, #\width - ubfx x1, x1, #\fld, #\width - cmp x1, xzr - and x2, x2, x1 - csinv x2, x2, xzr, ne - cbnz x2, \pass - b \fail -.endm - -.macro check_override idreg, fld, pass, fail - mrs x1, \idreg\()_el1 - __check_override \idreg \fld 4 \pass \fail -.endm - .text .pushsection .hyp.text, "ax" @@ -98,65 +74,7 @@ SYM_CODE_START_LOCAL(elx_sync) SYM_CODE_END(elx_sync) SYM_CODE_START_LOCAL(__finalise_el2) - check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve - -.Linit_sve: /* SVE register access */ - mrs x0, cptr_el2 // Disable SVE traps - bic x0, x0, #CPTR_EL2_TZ - msr cptr_el2, x0 - isb - mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector - msr_s SYS_ZCR_EL2, x1 // length for EL1. - -.Lskip_sve: - check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme - -.Linit_sme: /* SME register access and priority mapping */ - mrs x0, cptr_el2 // Disable SME traps - bic x0, x0, #CPTR_EL2_TSM - msr cptr_el2, x0 - isb - - mrs x1, sctlr_el2 - orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps - msr sctlr_el2, x1 - isb - - mov x0, #0 // SMCR controls - - // Full FP in SM? - mrs_s x1, SYS_ID_AA64SMFR0_EL1 - __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64 - -.Linit_sme_fa64: - orr x0, x0, SMCR_ELx_FA64_MASK -.Lskip_sme_fa64: - - // ZT0 available? - mrs_s x1, SYS_ID_AA64SMFR0_EL1 - __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0 -.Linit_sme_zt0: - orr x0, x0, SMCR_ELx_EZT0_MASK -.Lskip_sme_zt0: - - orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector - msr_s SYS_SMCR_EL2, x0 // length for EL1. - - mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? - ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 - cbz x1, .Lskip_sme - - msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal - - mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? - ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 - cbz x1, .Lskip_sme - - mrs_s x1, SYS_HCRX_EL2 - orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping - msr_s SYS_HCRX_EL2, x1 - -.Lskip_sme: + finalise_el2_state // nVHE? No way! Give me the real thing! // Sanity check: MMU *must* be off @@ -164,7 +82,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) tbnz x1, #0, 1f // Needs to be VHE capable, obviously - check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f + check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f x1 x2 1: mov_q x0, HVC_STUB_ERR eret diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 8cda276c7ce1..69d99951768b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1899,6 +1899,7 @@ static void kvm_hyp_init_symbols(void) kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); + kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1); kvm_nvhe_sym(__icache_flags) = __icache_flags; kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index c953fb4b9a13..a6d67c2bb5ae 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -183,6 +183,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 + finalise_el2_state /* Enable MMU, set vectors and stack. */ mov x0, x28 diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 0f9ac25afdf4..08d2b004f4b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -26,6 +26,7 @@ u64 id_aa64isar2_el1_sys_val; u64 id_aa64mmfr0_el1_sys_val; u64 id_aa64mmfr1_el1_sys_val; u64 id_aa64mmfr2_el1_sys_val; +u64 id_aa64smfr0_el1_sys_val; /* * Inject an unknown/undefined exception to an AArch64 guest while most of its |