summaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/arch_timer.c45
1 files changed, 36 insertions, 9 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 6e54f3542126..98c95f2fcba4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/*
+ * Work function for handling the backup timer that we schedule when a vcpu is
+ * no longer running, but had a timer programmed to fire in the future.
+ */
static void kvm_timer_inject_irq_work(struct work_struct *work)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
- kvm_timer_inject_irq(vcpu);
+
+ /*
+ * If the vcpu is blocked we want to wake it up so that it will see
+ * the timer has expired when entering the guest.
+ */
+ kvm_vcpu_kick(vcpu);
}
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
@@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
+bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ cycle_t cval, now;
+
+ if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
+ !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+ return false;
+
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ return cval <= now;
+}
+
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @vcpu: The vcpu pointer
@@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* populate the CPU timer again.
*/
timer_disarm(timer);
+
+ /*
+ * If the timer expired while we were not scheduled, now is the time
+ * to inject it.
+ */
+ if (kvm_timer_should_fire(vcpu))
+ kvm_timer_inject_irq(vcpu);
}
/**
@@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
cycle_t cval, now;
u64 ns;
- if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
- return;
-
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
BUG_ON(timer_is_armed(timer));
- if (cval <= now) {
+ if (kvm_timer_should_fire(vcpu)) {
/*
* Timer has already expired while we were not
* looking. Inject the interrupt and carry on.
@@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
return;
}
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
&timecounter->frac);
timer_arm(timer, ns);