summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-08-15 19:21:30 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-08-15 19:21:30 +0300
commit3e763ec7914f20f55ebd9a5c087fa26e8452257e (patch)
tree2e4edd52356b107fcf60042dfdc35e1f9d320043 /arch/arm64
parent0aa78d17099b04fd9d36fe338af48ad6fe2d7fca (diff)
parent6e949ddb0a6337817330c897e29ca4177c646f02 (diff)
downloadlinux-3e763ec7914f20f55ebd9a5c087fa26e8452257e.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM: - Plug race between enabling MTE and creating vcpus - Fix off-by-one bug when checking whether an address range is RAM x86: - Fixes for the new MMU, especially a memory leak on hosts with <39 physical address bits - Remove bogus EFER.NX checks on 32-bit non-PAE hosts - WAITPKG fix" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86/mmu: Protect marking SPs unsync when using TDP MMU with spinlock KVM: x86/mmu: Don't step down in the TDP iterator when zapping all SPTEs KVM: x86/mmu: Don't leak non-leaf SPTEs when zapping all SPTEs KVM: nVMX: Use vmx_need_pf_intercept() when deciding if L0 wants a #PF kvm: vmx: Sync all matching EPTPs when injecting nested EPT fault KVM: x86: remove dead initialization KVM: x86: Allow guest to set EFER.NX=1 on non-PAE 32-bit kernels KVM: VMX: Use current VMCS to query WAITPKG support for MSR emulation KVM: arm64: Fix race when enabling KVM_ARM_CAP_MTE KVM: arm64: Fix off-by-one in range_is_memory
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kvm/arm.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
2 files changed, 9 insertions, 5 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e9a2b8f27792..0ca72f5cda41 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.return_nisv_io_abort_to_user = true;
break;
case KVM_CAP_ARM_MTE:
- if (!system_supports_mte() || kvm->created_vcpus)
- return -EINVAL;
- r = 0;
- kvm->arch.mte_enabled = true;
+ mutex_lock(&kvm->lock);
+ if (!system_supports_mte() || kvm->created_vcpus) {
+ r = -EINVAL;
+ } else {
+ r = 0;
+ kvm->arch.mte_enabled = true;
+ }
+ mutex_unlock(&kvm->lock);
break;
default:
r = -EINVAL;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d938ce95d3bd..a6ce991b1467 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
{
struct kvm_mem_range r1, r2;
- if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+ if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
return false;
if (r1.start != r2.start)
return false;