summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-10-09 05:11:56 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2021-12-08 12:24:46 +0300
commit6f390916c4fb359507d9ac4bf1b28a4f8abee5c0 (patch)
treee0966bd41f639b4b128a11666bfc8b14459f30f2 /virt/kvm/kvm_main.c
parent91b01895071770ed0c256869d0f94d69a2fb8ecf (diff)
downloadlinux-6f390916c4fb359507d9ac4bf1b28a4f8abee5c0.tar.xz
KVM: s390: Ensure kvm_arch_no_poll() is read once when blocking vCPU
Wrap s390's halt_poll_max_steal with READ_ONCE and snapshot the result of kvm_arch_no_poll() in kvm_vcpu_block() to avoid a mostly-theoretical, largely benign bug on s390 where the result of kvm_arch_no_poll() could change due to userspace modifying halt_poll_max_steal while the vCPU is blocking. The bug is largely benign as it will either cause KVM to skip updating halt-polling times (no_poll toggles false=>true) or to update halt-polling times with a slightly flawed block_ns. Note, READ_ONCE is unnecessary in the current code, add it in case the arch hook is ever inlined, and to provide a hint that userspace can change the param at will. Fixes: 8b905d28ee17 ("KVM: s390: provide kvm_arch_no_poll function") Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20211009021236.4122790-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e9990c4c6e40..a26b069a6929 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3284,6 +3284,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
+ bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
ktime_t start, cur, poll_end;
bool waited = false;
u64 block_ns;
@@ -3291,7 +3292,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu);
start = cur = poll_end = ktime_get();
- if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
+ if (vcpu->halt_poll_ns && halt_poll_allowed) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.generic.halt_attempted_poll;
@@ -3346,7 +3347,7 @@ out:
update_halt_poll_stats(
vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
- if (!kvm_arch_no_poll(vcpu)) {
+ if (halt_poll_allowed) {
if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
} else if (vcpu->kvm->max_halt_poll_ns) {