summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-04-09 01:03:20 +0400
committerIngo Molnar <mingo@elte.hu>2010-05-07 13:31:02 +0400
commitab608344bcbde4f55ec4cd911b686b0ce3eae076 (patch)
treeebd38efabfaab59d6de11a24143d70e1eec36fae /arch/x86
parent2b0b5c6fe9b383f3cf35a0a6371c9d577bd523ff (diff)
downloadlinux-ab608344bcbde4f55ec4cd911b686b0ce3eae076.tar.xz
perf, x86: Improve the PEBS ABI
Rename perf_event_attr::precise to perf_event_attr::precise_ip and widen it to 2 bits. This new field describes the required precision of the PERF_SAMPLE_IP field: 0 - SAMPLE_IP can have arbitrary skid 1 - SAMPLE_IP must have constant skid 2 - SAMPLE_IP requested to have 0 skid 3 - SAMPLE_IP must have 0 skid And modify the Intel PEBS code accordingly. The PEBS implementation now supports up to precise_ip == 2, where we perform the IP fixup. Also s/PERF_RECORD_MISC_EXACT/&_IP/ to clarify its meaning, this bit should be set for each PERF_SAMPLE_IP field known to match the actual instruction triggering the event. This new scheme allows for a PEBS mode that uses the buffer for more than a single event. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c17
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c12
3 files changed, 24 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4a3f1f2b9b91..27fa9eeed024 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -488,6 +488,21 @@ static int x86_setup_perfctr(struct perf_event *event)
static int x86_pmu_hw_config(struct perf_event *event)
{
+ if (event->attr.precise_ip) {
+ int precise = 0;
+
+ /* Support for constant skid */
+ if (x86_pmu.pebs)
+ precise++;
+
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+
+ if (event->attr.precise_ip > precise)
+ return -EOPNOTSUPP;
+ }
+
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
@@ -1780,7 +1795,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
}
if (regs->flags & PERF_EFLAGS_EXACT)
- misc |= PERF_RECORD_MISC_EXACT;
+ misc |= PERF_RECORD_MISC_EXACT_IP;
return misc;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a4b56ac425cb..fdbc652d3feb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -563,7 +563,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
x86_pmu_disable_event(event);
- if (unlikely(event->attr.precise))
+ if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_disable(event);
}
@@ -615,7 +615,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
return;
}
- if (unlikely(event->attr.precise))
+ if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 35056f715e9e..18018d1311cd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -307,7 +307,7 @@ intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
- if (!event->attr.precise)
+ if (!event->attr.precise_ip)
return NULL;
if (x86_pmu.pebs_constraints) {
@@ -330,7 +330,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
cpuc->pebs_enabled |= 1ULL << hwc->idx;
WARN_ON_ONCE(cpuc->enabled);
- if (x86_pmu.intel_cap.pebs_trap)
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
}
@@ -345,7 +345,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
- if (x86_pmu.intel_cap.pebs_trap)
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_disable(event);
}
@@ -485,7 +485,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
regs.bp = pebs->bp;
regs.sp = pebs->sp;
- if (intel_pmu_pebs_fixup_ip(regs))
+ if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
@@ -518,7 +518,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
WARN_ON_ONCE(!event);
- if (!event->attr.precise)
+ if (!event->attr.precise_ip)
return;
n = top - at;
@@ -570,7 +570,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
WARN_ON_ONCE(!event);
- if (!event->attr.precise)
+ if (!event->attr.precise_ip)
continue;
if (__test_and_set_bit(bit, (unsigned long *)&status))