diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:47:28 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:47:28 +0300 |
commit | 12b7bcb43e6ea834ab2f5dc52d971e379a0ca109 (patch) | |
tree | 65218ee4792a1bae88feb75e615c5ec4e602c7a2 /arch/x86 | |
parent | 00bcf5cdd6c0e2e92ce3dd852ca68a3b779fa4ec (diff) | |
parent | 41aad2a6d4fcdda8d73c9739daf7a9f3f49499d6 (diff) | |
download | linux-12b7bcb43e6ea834ab2f5dc52d971e379a0ca109.tar.xz |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"The main kernel side changes were:
- uprobes enhancements (Masami Hiramatsu)
- Uncore group events enhancements (David Carrillo-Cisneros)
- x86 Intel: Add support for Skylake server uncore PMUs (Kan Liang)
- x86 Intel: LBR cleanups and enhancements, for better branch
annotation tracking (Peter Zijlstra)
- x86 Intel: Add support for PTWRITE and power event tracing
(Alexander Shishkin)
- ... various fixes, cleanups and smaller enhancements.
Lots of tooling changes - a couple of highlights:
- Support event group view with hierarchy mode in 'perf top' and
'perf report' (Namhyung Kim)
e.g.:
$ perf record -e '{cycles,instructions}' make
$ perf report --hierarchy --stdio
...
# Overhead Command / Shared Object / Symbol
# ...................... ..................................
...
25.74% 27.18%sh
19.96% 24.14%libc-2.24.so
9.55% 14.64%[.] __strcmp_sse2
1.54% 0.00%[.] __tfind
1.07% 1.13%[.] _int_malloc
0.95% 0.00%[.] __strchr_sse2
0.89% 1.39%[.] __tsearch
0.76% 0.00%[.] strlen
- Add branch stack / basic block info to 'perf annotate --stdio',
where for each branch, we add an asm comment after the instruction
with information on how often it was taken and predicted. See
example with color output at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
(Peter Zijlstra)
- Add support for using symbols in address filters with Intel PT and
ARM CoreSight (hardware assisted tracing facilities) (Adrian
Hunter, Mathieu Poirier)
- Add support for interacting with Coresight PMU ETMs/PTMs, that are
IP blocks to perform hardware assisted tracing on a ARM CPU core
(Mathieu Poirier)
- Support generating cross arch probes, i.e. if you specify a vmlinux
file for different arch than the one in the host machine,
$ perf probe --definition function_name args
will generate the probe definition string needed to append to the
target machine /sys/kernel/debug/tracing/kprobes_events file, using
scripting (Masami Hiramatsu).
- Allow configuring the default 'perf report -s' sort order in
~/.perfconfig, for instance, "sym,dso" may be more fitting for
kernel developers. (Arnaldo Carvalho de Melo)
- ... plus lots of other changes, refactorings, features and fixes"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (149 commits)
perf tests: Add dwarf unwind test for powerpc
perf probe: Match linkage name with mangled name
perf probe: Fix to cut off incompatible chars from group name
perf probe: Skip if the function address is 0
perf probe: Ignore the error of finding inline instance
perf intel-pt: Fix decoding when there are address filters
perf intel-pt: Enable decoder to handle TIP.PGD with missing IP
perf intel-pt: Read address filter from AUXTRACE_INFO event
perf intel-pt: Record address filter in AUXTRACE_INFO event
perf intel-pt: Add a helper function for processing AUXTRACE_INFO
perf intel-pt: Fix missing error codes processing auxtrace_info
perf intel-pt: Add support for recording the max non-turbo ratio
perf intel-pt: Fix snapshot overlap detection decoder errors
perf probe: Increase debug level of SDT debug messages
perf record: Add support for using symbols in address filters
perf symbols: Add dso__last_symbol()
perf record: Fix error paths
perf record: Rename label 'out_symbol_exit'
perf script: Fix vanished idle symbols
perf evsel: Add support for address filters
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/events/core.c | 24 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 31 | ||||
-rw-r--r-- | arch/x86/events/intel/ds.c | 108 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 70 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.c | 24 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.h | 5 | ||||
-rw-r--r-- | arch/x86/events/intel/rapl.c | 4 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore.c | 11 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore.h | 7 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore_snb.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore_snbep.c | 626 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 13 |
12 files changed, 820 insertions, 105 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d0efb5cb1b00..18a1acf86c90 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1201,6 +1201,9 @@ static int x86_pmu_add(struct perf_event *event, int flags) * If group events scheduling transaction was started, * skip the schedulability test here, it will be performed * at commit time (->commit_txn) as a whole. + * + * If commit fails, we'll call ->del() on all events + * for which ->add() was called. */ if (cpuc->txn_flags & PERF_PMU_TXN_ADD) goto done_collect; @@ -1223,6 +1226,14 @@ done_collect: cpuc->n_added += n - n0; cpuc->n_txn += n - n0; + if (x86_pmu.add) { + /* + * This is before x86_pmu_enable() will call x86_pmu_start(), + * so we enable LBRs before an event needs them etc.. + */ + x86_pmu.add(event); + } + ret = 0; out: return ret; @@ -1346,7 +1357,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; /* - * If we're called during a txn, we don't need to do anything. + * If we're called during a txn, we only need to undo x86_pmu.add. * The events never got scheduled and ->cancel_txn will truncate * the event_list. * @@ -1354,7 +1365,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) * an event added during that same TXN. */ if (cpuc->txn_flags & PERF_PMU_TXN_ADD) - return; + goto do_del; /* * Not a TXN, therefore cleanup properly. @@ -1384,6 +1395,15 @@ static void x86_pmu_del(struct perf_event *event, int flags) --cpuc->n_events; perf_event_update_userpage(event); + +do_del: + if (x86_pmu.del) { + /* + * This is after x86_pmu_stop(); so we disable LBRs after any + * event can need them etc.. + */ + x86_pmu.del(event); + } } int x86_pmu_handle_irq(struct pt_regs *regs) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 4c9a79b9cd69..a3a9eb84b5cf 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -1906,13 +1906,6 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_cp_status &= ~(1ull << hwc->idx); - /* - * must disable before any actual event - * because any event may be combined with LBR - */ - if (needs_branch_stack(event)) - intel_pmu_lbr_disable(event); - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; @@ -1924,6 +1917,14 @@ static void intel_pmu_disable_event(struct perf_event *event) intel_pmu_pebs_disable(event); } +static void intel_pmu_del_event(struct perf_event *event) +{ + if (needs_branch_stack(event)) + intel_pmu_lbr_del(event); + if (event->attr.precise_ip) + intel_pmu_pebs_del(event); +} + static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) { int idx = hwc->idx - INTEL_PMC_IDX_FIXED; @@ -1967,12 +1968,6 @@ static void intel_pmu_enable_event(struct perf_event *event) intel_pmu_enable_bts(hwc->config); return; } - /* - * must enabled before any actual event - * because any event may be combined with LBR - */ - if (needs_branch_stack(event)) - intel_pmu_lbr_enable(event); if (event->attr.exclude_host) cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); @@ -1993,6 +1988,14 @@ static void intel_pmu_enable_event(struct perf_event *event) __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } +static void intel_pmu_add_event(struct perf_event *event) +{ + if (event->attr.precise_ip) + intel_pmu_pebs_add(event); + if (needs_branch_stack(event)) + intel_pmu_lbr_add(event); +} + /* * Save and restart an expired event. Called by NMI contexts, * so it has to be careful about preempting normal event ops: @@ -3291,6 +3294,8 @@ static __initconst const struct x86_pmu intel_pmu = { .enable_all = intel_pmu_enable_all, .enable = intel_pmu_enable_event, .disable = intel_pmu_disable_event, + .add = intel_pmu_add_event, + .del = intel_pmu_del_event, .hw_config = intel_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 9b983a474253..0319311dbdbb 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -806,9 +806,65 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) return &emptyconstraint; } -static inline bool pebs_is_enabled(struct cpu_hw_events *cpuc) +/* + * We need the sched_task callback even for per-cpu events when we use + * the large interrupt threshold, such that we can provide PID and TID + * to PEBS samples. + */ +static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) +{ + return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); +} + +static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) +{ + struct debug_store *ds = cpuc->ds; + u64 threshold; + + if (cpuc->n_pebs == cpuc->n_large_pebs) { + threshold = ds->pebs_absolute_maximum - + x86_pmu.max_pebs_events * x86_pmu.pebs_record_size; + } else { + threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; + } + + ds->pebs_interrupt_threshold = threshold; +} + +static void +pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu) +{ + /* + * Make sure we get updated with the first PEBS + * event. It will trigger also during removal, but + * that does not hurt: + */ + bool update = cpuc->n_pebs == 1; + + if (needed_cb != pebs_needs_sched_cb(cpuc)) { + if (!needed_cb) + perf_sched_cb_inc(pmu); + else + perf_sched_cb_dec(pmu); + + update = true; + } + + if (update) + pebs_update_threshold(cpuc); +} + +void intel_pmu_pebs_add(struct perf_event *event) { - return (cpuc->pebs_enabled & ((1ULL << MAX_PEBS_EVENTS) - 1)); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + bool needed_cb = pebs_needs_sched_cb(cpuc); + + cpuc->n_pebs++; + if (hwc->flags & PERF_X86_EVENT_FREERUNNING) + cpuc->n_large_pebs++; + + pebs_update_state(needed_cb, cpuc, event->ctx->pmu); } void intel_pmu_pebs_enable(struct perf_event *event) @@ -816,12 +872,9 @@ void intel_pmu_pebs_enable(struct perf_event *event) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; struct debug_store *ds = cpuc->ds; - bool first_pebs; - u64 threshold; hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; - first_pebs = !pebs_is_enabled(cpuc); cpuc->pebs_enabled |= 1ULL << hwc->idx; if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) @@ -830,46 +883,34 @@ void intel_pmu_pebs_enable(struct perf_event *event) cpuc->pebs_enabled |= 1ULL << 63; /* - * When the event is constrained enough we can use a larger - * threshold and run the event with less frequent PMI. + * Use auto-reload if possible to save a MSR write in the PMI. + * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. */ - if (hwc->flags & PERF_X86_EVENT_FREERUNNING) { - threshold = ds->pebs_absolute_maximum - - x86_pmu.max_pebs_events * x86_pmu.pebs_record_size; - - if (first_pebs) - perf_sched_cb_inc(event->ctx->pmu); - } else { - threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; - - /* - * If not all events can use larger buffer, - * roll back to threshold = 1 - */ - if (!first_pebs && - (ds->pebs_interrupt_threshold > threshold)) - perf_sched_cb_dec(event->ctx->pmu); - } - - /* Use auto-reload if possible to save a MSR write in the PMI */ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { ds->pebs_event_reset[hwc->idx] = (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; } +} + +void intel_pmu_pebs_del(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + bool needed_cb = pebs_needs_sched_cb(cpuc); - if (first_pebs || ds->pebs_interrupt_threshold > threshold) - ds->pebs_interrupt_threshold = threshold; + cpuc->n_pebs--; + if (hwc->flags & PERF_X86_EVENT_FREERUNNING) + cpuc->n_large_pebs--; + + pebs_update_state(needed_cb, cpuc, event->ctx->pmu); } void intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - struct debug_store *ds = cpuc->ds; - bool large_pebs = ds->pebs_interrupt_threshold > - ds->pebs_buffer_base + x86_pmu.pebs_record_size; - if (large_pebs) + if (cpuc->n_pebs == cpuc->n_large_pebs) intel_pmu_drain_pebs_buffer(); cpuc->pebs_enabled &= ~(1ULL << hwc->idx); @@ -879,9 +920,6 @@ void intel_pmu_pebs_disable(struct perf_event *event) else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); - if (large_pebs && !pebs_is_enabled(cpuc)) - perf_sched_cb_dec(event->ctx->pmu); - if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 707d358e0dff..fc6cf21c535e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -380,7 +380,6 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct x86_perf_task_context *task_ctx; /* @@ -390,31 +389,21 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) */ task_ctx = ctx ? ctx->task_ctx_data : NULL; if (task_ctx) { - if (sched_in) { + if (sched_in) __intel_pmu_lbr_restore(task_ctx); - cpuc->lbr_context = ctx; - } else { + else __intel_pmu_lbr_save(task_ctx); - } return; } /* - * When sampling the branck stack in system-wide, it may be - * necessary to flush the stack on context switch. This happens - * when the branch stack does not tag its entries with the pid - * of the current task. Otherwise it becomes impossible to - * associate a branch entry with a task. This ambiguity is more - * likely to appear when the branch stack supports priv level - * filtering and the user sets it to monitor only at the user - * level (which could be a useful measurement in system-wide - * mode). In that case, the risk is high of having a branch - * stack with branch from multiple tasks. - */ - if (sched_in) { + * Since a context switch can flip the address space and LBR entries + * are not tagged with an identifier, we need to wipe the LBR, even for + * per-cpu events. You simply cannot resolve the branches from the old + * address space. + */ + if (sched_in) intel_pmu_lbr_reset(); - cpuc->lbr_context = ctx; - } } static inline bool branch_user_callstack(unsigned br_sel) @@ -422,7 +411,7 @@ static inline bool branch_user_callstack(unsigned br_sel) return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK); } -void intel_pmu_lbr_enable(struct perf_event *event) +void intel_pmu_lbr_add(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct x86_perf_task_context *task_ctx; @@ -430,27 +419,38 @@ void intel_pmu_lbr_enable(struct perf_event *event) if (!x86_pmu.lbr_nr) return; - /* - * Reset the LBR stack if we changed task context to - * avoid data leaks. - */ - if (event->ctx->task && cpuc->lbr_context != event->ctx) { - intel_pmu_lbr_reset(); - cpuc->lbr_context = event->ctx; - } cpuc->br_sel = event->hw.branch_reg.reg; - if (branch_user_callstack(cpuc->br_sel) && event->ctx && - event->ctx->task_ctx_data) { + if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) { task_ctx = event->ctx->task_ctx_data; task_ctx->lbr_callstack_users++; } - cpuc->lbr_users++; + /* + * Request pmu::sched_task() callback, which will fire inside the + * regular perf event scheduling, so that call will: + * + * - restore or wipe; when LBR-callstack, + * - wipe; otherwise, + * + * when this is from __perf_event_task_sched_in(). + * + * However, if this is from perf_install_in_context(), no such callback + * will follow and we'll need to reset the LBR here if this is the + * first LBR event. + * + * The problem is, we cannot tell these cases apart... but we can + * exclude the biggest chunk of cases by looking at + * event->total_time_running. An event that has accrued runtime cannot + * be 'new'. Conversely, a new event can get installed through the + * context switch path for the first time. + */ perf_sched_cb_inc(event->ctx->pmu); + if (!cpuc->lbr_users++ && !event->total_time_running) + intel_pmu_lbr_reset(); } -void intel_pmu_lbr_disable(struct perf_event *event) +void intel_pmu_lbr_del(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct x86_perf_task_context *task_ctx; @@ -467,12 +467,6 @@ void intel_pmu_lbr_disable(struct perf_event *event) cpuc->lbr_users--; WARN_ON_ONCE(cpuc->lbr_users < 0); perf_sched_cb_dec(event->ctx->pmu); - - if (cpuc->enabled && !cpuc->lbr_users) { - __intel_pmu_lbr_disable(); - /* avoid stale pointer */ - cpuc->lbr_context = NULL; - } } void intel_pmu_lbr_enable_all(bool pmi) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 861a7d9cb60f..c5047b8f777b 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -69,6 +69,8 @@ static struct pt_cap_desc { PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)), PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)), PT_CAP(mtc, 0, CR_EBX, BIT(3)), + PT_CAP(ptwrite, 0, CR_EBX, BIT(4)), + PT_CAP(power_event_trace, 0, CR_EBX, BIT(5)), PT_CAP(topa_output, 0, CR_ECX, BIT(0)), PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)), PT_CAP(single_range_output, 0, CR_ECX, BIT(2)), @@ -259,10 +261,16 @@ fail: #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \ RTIT_CTL_MTC_RANGE) +#define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ + RTIT_CTL_FUP_ON_PTW) + #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \ RTIT_CTL_DISRETC | \ RTIT_CTL_CYC_PSB | \ - RTIT_CTL_MTC) + RTIT_CTL_MTC | \ + RTIT_CTL_PWR_EVT_EN | \ + RTIT_CTL_FUP_ON_PTW | \ + RTIT_CTL_PTW_EN) static bool pt_event_valid(struct perf_event *event) { @@ -311,6 +319,20 @@ static bool pt_event_valid(struct perf_event *event) return false; } + if (config & RTIT_CTL_PWR_EVT_EN && + !pt_cap_get(PT_CAP_power_event_trace)) + return false; + + if (config & RTIT_CTL_PTW) { + if (!pt_cap_get(PT_CAP_ptwrite)) + return false; + + /* FUPonPTW without PTW doesn't make sense */ + if ((config & RTIT_CTL_FUP_ON_PTW) && + !(config & RTIT_CTL_PTW_EN)) + return false; + } + return true; } diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index efffa4a09f68..53473c21b554 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -26,11 +26,14 @@ #define RTIT_CTL_CYCLEACC BIT(1) #define RTIT_CTL_OS BIT(2) #define RTIT_CTL_USR BIT(3) +#define RTIT_CTL_PWR_EVT_EN BIT(4) +#define RTIT_CTL_FUP_ON_PTW BIT(5) #define RTIT_CTL_CR3EN BIT(7) #define RTIT_CTL_TOPA BIT(8) #define RTIT_CTL_MTC_EN BIT(9) #define RTIT_CTL_TSC_EN BIT(10) #define RTIT_CTL_DISRETC BIT(11) +#define RTIT_CTL_PTW_EN BIT(12) #define RTIT_CTL_BRANCH_EN BIT(13) #define RTIT_CTL_MTC_RANGE_OFFSET 14 #define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) @@ -91,6 +94,8 @@ enum pt_capabilities { PT_CAP_psb_cyc, PT_CAP_ip_filtering, PT_CAP_mtc, + PT_CAP_ptwrite, + PT_CAP_power_event_trace, PT_CAP_topa_output, PT_CAP_topa_multiple_entries, PT_CAP_single_range_output, diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 28865938aadf..b0f0e835a770 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -357,6 +357,8 @@ static int rapl_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + /* * check event is known (determines counter) */ @@ -765,6 +767,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 463dc7a5a6c3..d9844cc74486 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -664,6 +664,8 @@ static int uncore_pmu_event_init(struct perf_event *event) event->cpu = box->cpu; event->pmu_private = box; + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->hw.idx = -1; event->hw.last_tag = ~0ULL; event->hw.extra_reg.idx = EXTRA_REG_NONE; @@ -683,7 +685,8 @@ static int uncore_pmu_event_init(struct perf_event *event) /* fixed counters have event field hardcoded to zero */ hwc->config = 0ULL; } else { - hwc->config = event->attr.config & pmu->type->event_mask; + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); if (pmu->type->ops->hw_config) { ret = pmu->type->ops->hw_config(box, event); if (ret) @@ -1321,6 +1324,11 @@ static const struct intel_uncore_init_fun skl_uncore_init __initconst = { .pci_init = skl_uncore_pci_init, }; +static const struct intel_uncore_init_fun skx_uncore_init __initconst = { + .cpu_init = skx_uncore_cpu_init, + .pci_init = skx_uncore_pci_init, +}; + static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), @@ -1343,6 +1351,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), {}, }; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 78b9c23e2d8d..ad986c1e29bc 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -44,6 +44,7 @@ struct intel_uncore_type { unsigned perf_ctr; unsigned event_ctl; unsigned event_mask; + unsigned event_mask_ext; unsigned fixed_ctr; unsigned fixed_ctl; unsigned box_ctl; @@ -120,6 +121,7 @@ struct intel_uncore_box { }; #define UNCORE_BOX_FLAG_INITIATED 0 +#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ struct uncore_event_desc { struct kobj_attribute attr; @@ -172,6 +174,9 @@ static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) static inline unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) { + if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) + return idx * 8 + box->pmu->type->event_ctl; + return idx * 4 + box->pmu->type->event_ctl; } @@ -377,6 +382,8 @@ int bdx_uncore_pci_init(void); void bdx_uncore_cpu_init(void); int knl_uncore_pci_init(void); void knl_uncore_cpu_init(void); +int skx_uncore_pci_init(void); +void skx_uncore_cpu_init(void); /* perf_event_intel_uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 9d35ec0cb8fc..5f845eef9a4d 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -388,6 +388,8 @@ static int snb_uncore_imc_event_init(struct perf_event *event) event->cpu = box->cpu; event->pmu_private = box; + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->hw.idx = -1; event->hw.last_tag = ~0ULL; event->hw.extra_reg.idx = EXTRA_REG_NONE; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 8aee83bcf71f..272427700d48 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1,6 +1,10 @@ /* SandyBridge-EP/IvyTown uncore support */ #include "uncore.h" +/* SNB-EP pci bus to socket mapping */ +#define SNBEP_CPUNODEID 0x40 +#define SNBEP_GIDNIDMAP 0x54 + /* SNB-EP Box level control */ #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) @@ -264,15 +268,72 @@ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) +/* SKX pci bus to socket mapping */ +#define SKX_CPUNODEID 0xc0 +#define SKX_GIDNIDMAP 0xd4 + +/* SKX CHA */ +#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0) +#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9) +#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17) +#define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32) +#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33) +#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35) +#define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36) +#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37) +#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41) +#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51) +#define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) +#define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) +#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) + +/* SKX IIO */ +#define SKX_IIO0_MSR_PMON_CTL0 0xa48 +#define SKX_IIO0_MSR_PMON_CTR0 0xa41 +#define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40 +#define SKX_IIO_MSR_OFFSET 0x20 + +#define SKX_PMON_CTL_TRESH_MASK (0xff << 24) +#define SKX_PMON_CTL_TRESH_MASK_EXT (0xf) +#define SKX_PMON_CTL_CH_MASK (0xff << 4) +#define SKX_PMON_CTL_FC_MASK (0x7 << 12) +#define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_INVERT | \ + SKX_PMON_CTL_TRESH_MASK) +#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \ + SKX_PMON_CTL_CH_MASK | \ + SKX_PMON_CTL_FC_MASK) + +/* SKX IRP */ +#define SKX_IRP0_MSR_PMON_CTL0 0xa5b +#define SKX_IRP0_MSR_PMON_CTR0 0xa59 +#define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58 +#define SKX_IRP_MSR_OFFSET 0x20 + +/* SKX UPI */ +#define SKX_UPI_PCI_PMON_CTL0 0x350 +#define SKX_UPI_PCI_PMON_CTR0 0x318 +#define SKX_UPI_PCI_PMON_BOX_CTL 0x378 +#define SKX_PMON_CTL_UMASK_EXT 0xff + +/* SKX M2M */ +#define SKX_M2M_PCI_PMON_CTL0 0x228 +#define SKX_M2M_PCI_PMON_CTR0 0x200 +#define SKX_M2M_PCI_PMON_BOX_CTL 0x258 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35"); DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); @@ -280,6 +341,8 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); +DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43"); +DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); @@ -288,18 +351,26 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); +DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); +DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26"); +DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32"); +DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36"); +DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37"); DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); +DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50"); +DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60"); DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); @@ -1153,7 +1224,7 @@ static struct pci_driver snbep_uncore_pci_driver = { /* * build pci bus to socket mapping */ -static int snbep_pci2phy_map_init(int devid) +static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse) { struct pci_dev *ubox_dev = NULL; int i, bus, nodeid, segment; @@ -1168,12 +1239,12 @@ static int snbep_pci2phy_map_init(int devid) break; bus = ubox_dev->bus->number; /* get the Node ID of the local register */ - err = pci_read_config_dword(ubox_dev, 0x40, &config); + err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); if (err) break; nodeid = config; /* get the Node ID mapping */ - err = pci_read_config_dword(ubox_dev, 0x54, &config); + err = pci_read_config_dword(ubox_dev, idmap_loc, &config); if (err) break; @@ -1207,11 +1278,20 @@ static int snbep_pci2phy_map_init(int devid) raw_spin_lock(&pci2phy_map_lock); list_for_each_entry(map, &pci2phy_map_head, list) { i = -1; - for (bus = 255; bus >= 0; bus--) { - if (map->pbus_to_physid[bus] >= 0) - i = map->pbus_to_physid[bus]; - else - map->pbus_to_physid[bus] = i; + if (reverse) { + for (bus = 255; bus >= 0; bus--) { + if (map->pbus_to_physid[bus] >= 0) + i = map->pbus_to_physid[bus]; + else + map->pbus_to_physid[bus] = i; + } + } else { + for (bus = 0; bus <= 255; bus++) { + if (map->pbus_to_physid[bus] >= 0) + i = map->pbus_to_physid[bus]; + else + map->pbus_to_physid[bus] = i; + } } } raw_spin_unlock(&pci2phy_map_lock); @@ -1224,7 +1304,7 @@ static int snbep_pci2phy_map_init(int devid) int snbep_uncore_pci_init(void) { - int ret = snbep_pci2phy_map_init(0x3ce0); + int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); if (ret) return ret; uncore_pci_uncores = snbep_pci_uncores; @@ -1788,7 +1868,7 @@ static struct pci_driver ivbep_uncore_pci_driver = { int ivbep_uncore_pci_init(void) { - int ret = snbep_pci2phy_map_init(0x0e1e); + int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); if (ret) return ret; uncore_pci_uncores = ivbep_pci_uncores; @@ -2897,7 +2977,7 @@ static struct pci_driver hswep_uncore_pci_driver = { int hswep_uncore_pci_init(void) { - int ret = snbep_pci2phy_map_init(0x2f1e); + int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); if (ret) return ret; uncore_pci_uncores = hswep_pci_uncores; @@ -3186,7 +3266,7 @@ static struct pci_driver bdx_uncore_pci_driver = { int bdx_uncore_pci_init(void) { - int ret = snbep_pci2phy_map_init(0x6f1e); + int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); if (ret) return ret; @@ -3196,3 +3276,525 @@ int bdx_uncore_pci_init(void) } /* end of BDX uncore support */ + +/* SKX uncore support */ + +static struct intel_uncore_type skx_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = HSWEP_U_MSR_PMON_CTR0, + .event_ctl = HSWEP_U_MSR_PMON_CTL0, + .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_ubox_format_group, +}; + +static struct attribute *skx_uncore_cha_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_tid_en.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_filter_tid4.attr, + &format_attr_filter_link4.attr, + &format_attr_filter_state5.attr, + &format_attr_filter_rem.attr, + &format_attr_filter_loc.attr, + &format_attr_filter_nm.attr, + &format_attr_filter_all_op.attr, + &format_attr_filter_not_nm.attr, + &format_attr_filter_opc_0.attr, + &format_attr_filter_opc_1.attr, + &format_attr_filter_nc.attr, + &format_attr_filter_c6.attr, + &format_attr_filter_isoc.attr, + NULL, +}; + +static struct attribute_group skx_uncore_chabox_format_group = { + .name = "format", + .attrs = skx_uncore_cha_formats_attr, +}; + +static struct event_constraint skx_uncore_chabox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x11, 0x1), + UNCORE_EVENT_CONSTRAINT(0x36, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct extra_reg skx_uncore_cha_extra_regs[] = { + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4), +}; + +static u64 skx_cha_filter_mask(int fields) +{ + u64 mask = 0; + + if (fields & 0x1) + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID; + if (fields & 0x2) + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; + if (fields & 0x4) + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; + return mask; +} + +static struct event_constraint * +skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); +} + +static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct extra_reg *er; + int idx = 0; + + for (er = skx_uncore_cha_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + idx |= er->idx; + } + + if (idx) { + reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + + HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & skx_cha_filter_mask(idx); + reg1->idx = idx; + } + return 0; +} + +static struct intel_uncore_ops skx_uncore_chabox_ops = { + /* There is no frz_en for chabox ctl */ + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = hswep_cbox_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = skx_cha_hw_config, + .get_constraint = skx_cha_get_constraint, + .put_constraint = snbep_cbox_put_constraint, +}; + +static struct intel_uncore_type skx_uncore_chabox = { + .name = "cha", + .num_counters = 4, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_C0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = skx_uncore_chabox_constraints, + .ops = &skx_uncore_chabox_ops, + .format_group = &skx_uncore_chabox_format_group, +}; + +static struct attribute *skx_uncore_iio_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh9.attr, + &format_attr_ch_mask.attr, + &format_attr_fc_mask.attr, + NULL, +}; + +static struct attribute_group skx_uncore_iio_format_group = { + .name = "format", + .attrs = skx_uncore_iio_formats_attr, +}; + +static struct event_constraint skx_uncore_iio_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x83, 0x3), + UNCORE_EVENT_CONSTRAINT(0x88, 0xc), + UNCORE_EVENT_CONSTRAINT(0x95, 0xc), + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), + UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), + UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), + EVENT_CONSTRAINT_END +}; + +static void skx_iio_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops skx_uncore_iio_ops = { + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = skx_iio_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct intel_uncore_type skx_uncore_iio = { + .name = "iio", + .num_counters = 4, + .num_boxes = 5, + .perf_ctr_bits = 48, + .event_ctl = SKX_IIO0_MSR_PMON_CTL0, + .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, + .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, + .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, + .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL, + .msr_offset = SKX_IIO_MSR_OFFSET, + .constraints = skx_uncore_iio_constraints, + .ops = &skx_uncore_iio_ops, + .format_group = &skx_uncore_iio_format_group, +}; + +static struct attribute *skx_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group skx_uncore_format_group = { + .name = "format", + .attrs = skx_uncore_formats_attr, +}; + +static struct intel_uncore_type skx_uncore_irp = { + .name = "irp", + .num_counters = 2, + .num_boxes = 5, + .perf_ctr_bits = 48, + .event_ctl = SKX_IRP0_MSR_PMON_CTL0, + .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL, + .msr_offset = SKX_IRP_MSR_OFFSET, + .ops = &skx_uncore_iio_ops, + .format_group = &skx_uncore_format_group, +}; + +static struct intel_uncore_ops skx_uncore_pcu_ops = { + IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = hswep_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + +static struct intel_uncore_type skx_uncore_pcu = { + .name = "pcu", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, + .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, + .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &skx_uncore_pcu_ops, + .format_group = &snbep_uncore_pcu_format_group, +}; + +static struct intel_uncore_type *skx_msr_uncores[] = { + &skx_uncore_ubox, + &skx_uncore_chabox, + &skx_uncore_iio, + &skx_uncore_irp, + &skx_uncore_pcu, + NULL, +}; + +static int skx_count_chabox(void) +{ + struct pci_dev *chabox_dev = NULL; + int bus, count = 0; + + while (1) { + chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); + if (!chabox_dev) + break; + if (count == 0) + bus = chabox_dev->bus->number; + if (bus != chabox_dev->bus->number) + break; + count++; + } + + pci_dev_put(chabox_dev); + return count; +} + +void skx_uncore_cpu_init(void) +{ + skx_uncore_chabox.num_boxes = skx_count_chabox(); + uncore_msr_uncores = skx_msr_uncores; +} + +static struct intel_uncore_type skx_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + .event_descs = hswep_uncore_imc_events, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &ivbep_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +static struct attribute *skx_upi_uncore_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_umask_ext.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group skx_upi_uncore_format_group = { + .name = "format", + .attrs = skx_upi_uncore_formats_attr, +}; + +static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); +} + +static struct intel_uncore_ops skx_upi_uncore_pci_ops = { + .init_box = skx_upi_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +static struct intel_uncore_type skx_uncore_upi = { + .name = "upi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = SKX_UPI_PCI_PMON_CTR0, + .event_ctl = SKX_UPI_PCI_PMON_CTL0, + .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, + .event_mask_ext = SKX_PMON_CTL_UMASK_EXT, + .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, + .ops = &skx_upi_uncore_pci_ops, + .format_group = &skx_upi_uncore_format_group, +}; + +static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); +} + +static struct intel_uncore_ops skx_m2m_uncore_pci_ops = { + .init_box = skx_m2m_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +static struct intel_uncore_type skx_uncore_m2m = { + .name = "m2m", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .perf_ctr = SKX_M2M_PCI_PMON_CTR0, + .event_ctl = SKX_M2M_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL, + .ops = &skx_m2m_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +static struct event_constraint skx_uncore_m2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type skx_uncore_m2pcie = { + .name = "m2pcie", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .constraints = skx_uncore_m2pcie_constraints, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &ivbep_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +static struct event_constraint skx_uncore_m3upi_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), + UNCORE_EVENT_CONSTRAINT(0x40, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), + UNCORE_EVENT_CONSTRAINT(0x50, 0x7), + UNCORE_EVENT_CONSTRAINT(0x51, 0x7), + UNCORE_EVENT_CONSTRAINT(0x52, 0x7), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type skx_uncore_m3upi = { + .name = "m3upi", + .num_counters = 3, + .num_boxes = 3, + .perf_ctr_bits = 48, + .constraints = skx_uncore_m3upi_constraints, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &ivbep_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +enum { + SKX_PCI_UNCORE_IMC, + SKX_PCI_UNCORE_M2M, + SKX_PCI_UNCORE_UPI, + SKX_PCI_UNCORE_M2PCIE, + SKX_PCI_UNCORE_M3UPI, +}; + +static struct intel_uncore_type *skx_pci_uncores[] = { + [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc, + [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m, + [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi, + [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie, + [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi, + NULL, +}; + +static const struct pci_device_id skx_uncore_pci_ids[] = { + { /* MC0 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0), + }, + { /* MC0 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1), + }, + { /* MC0 Channel 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2), + }, + { /* MC1 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3), + }, + { /* MC1 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4), + }, + { /* MC1 Channel 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5), + }, + { /* M2M0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0), + }, + { /* M2M1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1), + }, + { /* UPI0 Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0), + }, + { /* UPI0 Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1), + }, + { /* UPI1 Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2), + }, + { /* M2PCIe 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0), + }, + { /* M2PCIe 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1), + }, + { /* M2PCIe 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2), + }, + { /* M2PCIe 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), + }, + { /* M3UPI0 Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), + }, + { /* M3UPI0 Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), + }, + { /* M3UPI1 Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), + }, + { /* end: all zeroes */ } +}; + + +static struct pci_driver skx_uncore_pci_driver = { + .name = "skx_uncore", + .id_table = skx_uncore_pci_ids, +}; + +int skx_uncore_pci_init(void) +{ + /* need to double check pci address */ + int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false); + + if (ret) + return ret; + + uncore_pci_uncores = skx_pci_uncores; + uncore_pci_driver = &skx_uncore_pci_driver; + return 0; +} + +/* end of SKX uncore support */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 8c4a47706296..5874d8de1f8d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -194,12 +194,13 @@ struct cpu_hw_events { */ struct debug_store *ds; u64 pebs_enabled; + int n_pebs; + int n_large_pebs; /* * Intel LBR bits */ int lbr_users; - void *lbr_context; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; @@ -508,6 +509,8 @@ struct x86_pmu { void (*enable_all)(int added); void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); + void (*add)(struct perf_event *); + void (*del)(struct perf_event *); int (*hw_config)(struct perf_event *event); int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); unsigned eventsel; @@ -888,6 +891,10 @@ extern struct event_constraint intel_skl_pebs_event_constraints[]; struct event_constraint *intel_pebs_constraints(struct perf_event *event); +void intel_pmu_pebs_add(struct perf_event *event); + +void intel_pmu_pebs_del(struct perf_event *event); + void intel_pmu_pebs_enable(struct perf_event *event); void intel_pmu_pebs_disable(struct perf_event *event); @@ -906,9 +913,9 @@ u64 lbr_from_signext_quirk_wr(u64 val); void intel_pmu_lbr_reset(void); -void intel_pmu_lbr_enable(struct perf_event *event); +void intel_pmu_lbr_add(struct perf_event *event); -void intel_pmu_lbr_disable(struct perf_event *event); +void intel_pmu_lbr_del(struct perf_event *event); void intel_pmu_lbr_enable_all(bool pmi); |