diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/hyperv.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/trace.h | 93 |
2 files changed, 98 insertions, 5 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 101c2e4a0255..2d83d4598507 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -152,7 +152,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) struct kvm_vcpu_hv_stimer *stimer; int gsi, idx, stimers_pending; - vcpu_debug(vcpu, "Hyper-V SynIC acked sint %d\n", sint); + trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) synic_clear_sint_msg_pending(synic, sint); @@ -202,8 +202,8 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, if (!synic->active) return 1; - vcpu_debug(vcpu, "Hyper-V SynIC set msr 0x%x 0x%llx host %d\n", - msr, data, host); + trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); + ret = 0; switch (msr) { case HV_X64_MSR_SCONTROL: @@ -312,7 +312,7 @@ int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) irq.level = 1; ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL); - vcpu_debug(vcpu, "Hyper-V SynIC set irq ret %d\n", ret); + trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); return ret; } @@ -332,7 +332,7 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); int i; - vcpu_debug(vcpu, "Hyper-V SynIC send eoi vec %d\n", vector); + trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); for (i = 0; i < ARRAY_SIZE(synic->sint); i++) if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index ab9ae67a80e4..4be350003bce 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1025,6 +1025,99 @@ TRACE_EVENT(kvm_pi_irte_update, __entry->pi_desc_addr) ); +/* + * Tracepoint for kvm_hv_notify_acked_sint. + */ +TRACE_EVENT(kvm_hv_notify_acked_sint, + TP_PROTO(int vcpu_id, u32 sint), + TP_ARGS(vcpu_id, sint), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, sint) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->sint = sint; + ), + + TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) +); + +/* + * Tracepoint for synic_set_irq. + */ +TRACE_EVENT(kvm_hv_synic_set_irq, + TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), + TP_ARGS(vcpu_id, sint, vector, ret), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, sint) + __field(int, vector) + __field(int, ret) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->sint = sint; + __entry->vector = vector; + __entry->ret = ret; + ), + + TP_printk("vcpu_id %d sint %u vector %d ret %d", + __entry->vcpu_id, __entry->sint, __entry->vector, + __entry->ret) +); + +/* + * Tracepoint for kvm_hv_synic_send_eoi. + */ +TRACE_EVENT(kvm_hv_synic_send_eoi, + TP_PROTO(int vcpu_id, int vector), + TP_ARGS(vcpu_id, vector), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, sint) + __field(int, vector) + __field(int, ret) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vector = vector; + ), + + TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) +); + +/* + * Tracepoint for synic_set_msr. + */ +TRACE_EVENT(kvm_hv_synic_set_msr, + TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), + TP_ARGS(vcpu_id, msr, data, host), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, msr) + __field(u64, data) + __field(bool, host) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->msr = msr; + __entry->data = data; + __entry->host = host + ), + + TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", + __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) +); + #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH |