summaryrefslogtreecommitdiff
path: root/include/kvm/arm_pmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/kvm/arm_pmu.h')
-rw-r--r--include/kvm/arm_pmu.h47
1 files changed, 30 insertions, 17 deletions
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index c4aabbf002f7..e08aeec5d936 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -10,24 +10,23 @@
#include <linux/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
-#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#define KVM_ARMV8_PMU_MAX_COUNTERS 32
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
-
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
};
struct kvm_pmu_events {
- u32 events_host;
- u32 events_guest;
+ u64 events_host;
+ u64 events_guest;
};
struct kvm_pmu {
struct irq_work overflow_work;
struct kvm_pmu_events events;
- struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
int irq_num;
bool created;
bool irq_level;
@@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
- (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
/*
* Updates the vcpu's view of the pmu events for this cpu.
@@ -90,18 +90,12 @@ void kvm_vcpu_pmu_resync_el0(void);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
-/*
- * Evaluates as true when emulating PMUv3p5, and false otherwise.
- */
-#define kvm_pmu_is_3p5(vcpu) ({ \
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
- u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
- \
- pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
-})
-
u8 kvm_arm_pmu_get_pmuver_limit(void);
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
+int kvm_arm_set_default_pmu(struct kvm *kvm);
+u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
@@ -164,16 +158,35 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
}
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
-#define kvm_pmu_is_3p5(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
+static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+ return 0;
+}
static inline void kvm_vcpu_pmu_resync_el0(void) {}
+static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
+static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
#endif
#endif