summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/hyperv.c
diff options
context:
space:
mode:
authorRoman Kagan <rkagan@virtuozzo.com>2018-12-10 21:47:27 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-14 19:59:51 +0300
commit7deec5e0df741102c9b54156c88cd1476d272ae5 (patch)
treee588aa5a3edb0177df314c3afe836dc376562f2b /arch/x86/kvm/hyperv.c
parent3a0e7731724f6ac684129f1763eef4d2d3e7dec7 (diff)
downloadlinux-7deec5e0df741102c9b54156c88cd1476d272ae5.tar.xz
x86: kvm: hyperv: don't retry message delivery for periodic timers
The SynIC message delivery protocol allows the message originator to request, should the message slot be busy, to be notified when it's free. However, this is unnecessary and even undesirable for messages generated by SynIC timers in periodic mode: if the period is short enough compared to the time the guest spends in the timer interrupt handler, so the timer ticks start piling up, the excessive interactions due to this notification and retried message delivery only makes the things worse. [This was observed, in particular, with Windows L2 guests setting (temporarily) the periodic timer to 2 kHz, and spending hundreds of microseconds in the timer interrupt handler due to several L2->L1 exits; under some load in L0 this could exceed 500 us so the timer ticks started to pile up and the guest livelocked.] Relieve the situation somewhat by not retrying message delivery for periodic SynIC timers. This appears to remain within the "lazy" lost ticks policy for SynIC timers as implemented in KVM. Note that it doesn't solve the fundamental problem of livelocking the guest with a periodic timer whose period is smaller than the time needed to process a tick, but it makes it a bit less likely to be triggered. Signed-off-by: Roman Kagan <rkagan@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/hyperv.c')
-rw-r--r--arch/x86/kvm/hyperv.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 17d04d2c6d4f..ded4b0c90984 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -557,7 +557,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
}
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
- struct hv_message *src_msg)
+ struct hv_message *src_msg, bool no_retry)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
@@ -584,6 +584,9 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
return r;
if (hv_hdr.message_type != HVMSG_NONE) {
+ if (no_retry)
+ return 0;
+
hv_hdr.message_flags.msg_pending = 1;
r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
&hv_hdr.message_flags,
@@ -617,10 +620,17 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
struct hv_timer_message_payload *payload =
(struct hv_timer_message_payload *)&msg->u.payload;
+ /*
+ * To avoid piling up periodic ticks, don't retry message
+ * delivery for them (within "lazy" lost ticks policy).
+ */
+ bool no_retry = stimer->config & HV_STIMER_PERIODIC;
+
payload->expiration_time = stimer->exp_time;
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
return synic_deliver_msg(vcpu_to_synic(vcpu),
- HV_STIMER_SINT(stimer->config), msg);
+ HV_STIMER_SINT(stimer->config), msg,
+ no_retry);
}
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)