diff options
Diffstat (limited to 'arch/x86/events/intel')
-rw-r--r-- | arch/x86/events/intel/core.c | 92 | ||||
-rw-r--r-- | arch/x86/events/intel/cstate.c | 44 | ||||
-rw-r--r-- | arch/x86/events/intel/ds.c | 51 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.c | 336 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.h | 12 | ||||
-rw-r--r-- | arch/x86/events/intel/rapl.c | 30 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore.c | 28 |
8 files changed, 375 insertions, 220 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e4c2cb65ea50..27ee47a7be66 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -18,6 +18,7 @@ #include <asm/cpufeature.h> #include <asm/hardirq.h> #include <asm/intel-family.h> +#include <asm/intel_pt.h> #include <asm/apic.h> #include <asm/cpu_device_id.h> @@ -3298,6 +3299,13 @@ static int intel_pmu_hw_config(struct perf_event *event) } } + if (event->attr.aux_output) { + if (!event->attr.precise_ip) + return -EINVAL; + + event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; + } + if (event->attr.type != PERF_TYPE_RAW) return 0; @@ -3816,6 +3824,14 @@ static int intel_pmu_check_period(struct perf_event *event, u64 value) return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; } +static int intel_pmu_aux_output_match(struct perf_event *event) +{ + if (!x86_pmu.intel_cap.pebs_output_pt_available) + return 0; + + return is_intel_pt_event(event); +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -3940,6 +3956,8 @@ static __initconst const struct x86_pmu intel_pmu = { .sched_task = intel_pmu_sched_task, .check_period = intel_pmu_check_period, + + .aux_output_match = intel_pmu_aux_output_match, }; static __init void intel_clovertown_quirk(void) @@ -3969,31 +3987,31 @@ static __init void intel_clovertown_quirk(void) } static const struct x86_cpu_desc isolation_ucodes[] = { - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f), - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), {} }; @@ -4151,7 +4169,7 @@ static const struct x86_cpu_desc counter_freezing_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008), - INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_D, 1, 0x00000028), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006), {} @@ -4649,7 +4667,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_FAM6_ATOM_SILVERMONT_MID: case INTEL_FAM6_ATOM_AIRMONT: case INTEL_FAM6_ATOM_AIRMONT_MID: @@ -4671,7 +4689,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GOLDMONT_X: + case INTEL_FAM6_ATOM_GOLDMONT_D: x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -4727,7 +4745,7 @@ __init int intel_pmu_init(void) name = "goldmont_plus"; break; - case INTEL_FAM6_ATOM_TREMONT_X: + case INTEL_FAM6_ATOM_TREMONT_D: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -4863,10 +4881,10 @@ __init int intel_pmu_init(void) break; - case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: - case INTEL_FAM6_HASWELL_ULT: - case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_HASWELL_L: + case INTEL_FAM6_HASWELL_G: x86_add_quirk(intel_ht_bug); x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; @@ -4896,9 +4914,9 @@ __init int intel_pmu_init(void) name = "haswell"; break; - case INTEL_FAM6_BROADWELL_CORE: - case INTEL_FAM6_BROADWELL_XEON_D: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL: + case INTEL_FAM6_BROADWELL_D: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; @@ -4961,10 +4979,10 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: pmem = true; /* fall through */ - case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: - case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE_L: + case INTEL_FAM6_SKYLAKE: + case INTEL_FAM6_KABYLAKE_L: + case INTEL_FAM6_KABYLAKE: x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -5008,11 +5026,11 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ICELAKE_X: - case INTEL_FAM6_ICELAKE_XEON_D: + case INTEL_FAM6_ICELAKE_D: pmem = true; /* fall through */ - case INTEL_FAM6_ICELAKE_MOBILE: - case INTEL_FAM6_ICELAKE_DESKTOP: + case INTEL_FAM6_ICELAKE_L: + case INTEL_FAM6_ICELAKE: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 688592b34564..9f2f39003d96 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -446,7 +446,7 @@ static int cstate_cpu_init(unsigned int cpu) return 0; } -const struct attribute_group *core_attr_update[] = { +static const struct attribute_group *core_attr_update[] = { &group_cstate_core_c1, &group_cstate_core_c3, &group_cstate_core_c6, @@ -454,7 +454,7 @@ const struct attribute_group *core_attr_update[] = { NULL, }; -const struct attribute_group *pkg_attr_update[] = { +static const struct attribute_group *pkg_attr_update[] = { &group_cstate_pkg_c2, &group_cstate_pkg_c3, &group_cstate_pkg_c6, @@ -593,40 +593,40 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_G, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_D, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_D, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index f1269e804e9b..ce83950036c5 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -902,6 +902,9 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) */ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) { + if (cpuc->n_pebs == cpuc->n_pebs_via_pt) + return false; + return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); } @@ -919,6 +922,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) u64 threshold; int reserved; + if (cpuc->n_pebs_via_pt) + return; + if (x86_pmu.flags & PMU_FL_PEBS_ALL) reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed; else @@ -1059,10 +1065,40 @@ void intel_pmu_pebs_add(struct perf_event *event) cpuc->n_pebs++; if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs++; + if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) + cpuc->n_pebs_via_pt++; pebs_update_state(needed_cb, cpuc, event, true); } +static void intel_pmu_pebs_via_pt_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (!is_pebs_pt(event)) + return; + + if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) + cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; +} + +static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + struct debug_store *ds = cpuc->ds; + + if (!is_pebs_pt(event)) + return; + + if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) + cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; + + cpuc->pebs_enabled |= PEBS_OUTPUT_PT; + + wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]); +} + void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -1100,6 +1136,8 @@ void intel_pmu_pebs_enable(struct perf_event *event) } else { ds->pebs_event_reset[hwc->idx] = 0; } + + intel_pmu_pebs_via_pt_enable(event); } void intel_pmu_pebs_del(struct perf_event *event) @@ -1111,6 +1149,8 @@ void intel_pmu_pebs_del(struct perf_event *event) cpuc->n_pebs--; if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs--; + if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) + cpuc->n_pebs_via_pt--; pebs_update_state(needed_cb, cpuc, event, false); } @@ -1120,7 +1160,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - if (cpuc->n_pebs == cpuc->n_large_pebs) + if (cpuc->n_pebs == cpuc->n_large_pebs && + cpuc->n_pebs != cpuc->n_pebs_via_pt) intel_pmu_drain_pebs_buffer(); cpuc->pebs_enabled &= ~(1ULL << hwc->idx); @@ -1131,6 +1172,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); + intel_pmu_pebs_via_pt_disable(event); + if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); @@ -2031,6 +2074,12 @@ void __init intel_ds_init(void) PERF_SAMPLE_REGS_INTR); } pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual); + + if (x86_pmu.intel_cap.pebs_output_pt_available) { + pr_cont("PEBS-via-PT, "); + x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + } + break; default: diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 6f814a27416b..ea54634eabf3 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -273,7 +273,7 @@ static inline bool lbr_from_signext_quirk_needed(void) return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX); } -DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); +static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); /* If quirk is enabled, ensure sign extension is 63 bits: */ inline u64 lbr_from_signext_quirk_wr(u64 val) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index d3dc2274ddd4..74e80ed9c6c4 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -204,9 +204,9 @@ static int __init pt_pmu_hw_init(void) /* model-specific quirks */ switch (boot_cpu_data.x86_model) { - case INTEL_FAM6_BROADWELL_CORE: - case INTEL_FAM6_BROADWELL_XEON_D: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL: + case INTEL_FAM6_BROADWELL_D: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: /* not setting BRANCH_EN will #GP, erratum BDM106 */ pt_pmu.branch_en_always_on = true; @@ -545,33 +545,62 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx, wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); } -/* - * Keep ToPA table-related metadata on the same page as the actual table, - * taking up a few words from the top - */ - -#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1) - /** - * struct topa - page-sized ToPA table with metadata at the top - * @table: actual ToPA table entries, as understood by PT hardware + * struct topa - ToPA metadata * @list: linkage to struct pt_buffer's list of tables - * @phys: physical address of this page * @offset: offset of the first entry in this table in the buffer * @size: total size of all entries in this table * @last: index of the last initialized entry in this table + * @z_count: how many times the first entry repeats */ struct topa { - struct topa_entry table[TENTS_PER_PAGE]; struct list_head list; - u64 phys; u64 offset; size_t size; int last; + unsigned int z_count; }; +/* + * Keep ToPA table-related metadata on the same page as the actual table, + * taking up a few words from the top + */ + +#define TENTS_PER_PAGE \ + ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry)) + +/** + * struct topa_page - page-sized ToPA table with metadata at the top + * @table: actual ToPA table entries, as understood by PT hardware + * @topa: metadata + */ +struct topa_page { + struct topa_entry table[TENTS_PER_PAGE]; + struct topa topa; +}; + +static inline struct topa_page *topa_to_page(struct topa *topa) +{ + return container_of(topa, struct topa_page, topa); +} + +static inline struct topa_page *topa_entry_to_page(struct topa_entry *te) +{ + return (struct topa_page *)((unsigned long)te & PAGE_MASK); +} + +static inline phys_addr_t topa_pfn(struct topa *topa) +{ + return PFN_DOWN(virt_to_phys(topa_to_page(topa))); +} + /* make -1 stand for the last table entry */ -#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)]) +#define TOPA_ENTRY(t, i) \ + ((i) == -1 \ + ? &topa_to_page(t)->table[(t)->last] \ + : &topa_to_page(t)->table[(i)]) +#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size)) +#define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size) /** * topa_alloc() - allocate page-sized ToPA table @@ -583,27 +612,26 @@ struct topa { static struct topa *topa_alloc(int cpu, gfp_t gfp) { int node = cpu_to_node(cpu); - struct topa *topa; + struct topa_page *tp; struct page *p; p = alloc_pages_node(node, gfp | __GFP_ZERO, 0); if (!p) return NULL; - topa = page_address(p); - topa->last = 0; - topa->phys = page_to_phys(p); + tp = page_address(p); + tp->topa.last = 0; /* * In case of singe-entry ToPA, always put the self-referencing END * link as the 2nd entry in the table */ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { - TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT; - TOPA_ENTRY(topa, 1)->end = 1; + TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p); + TOPA_ENTRY(&tp->topa, 1)->end = 1; } - return topa; + return &tp->topa; } /** @@ -643,7 +671,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa) BUG_ON(last->last != TENTS_PER_PAGE - 1); - TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT; + TOPA_ENTRY(last, -1)->base = topa_pfn(topa); TOPA_ENTRY(last, -1)->end = 1; } @@ -670,7 +698,7 @@ static bool topa_table_full(struct topa *topa) * * Return: 0 on success or error code. */ -static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp) +static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp) { struct topa *topa = buf->last; int order = 0; @@ -681,13 +709,18 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp) order = page_private(p); if (topa_table_full(topa)) { - topa = topa_alloc(buf->cpu, gfp); + topa = topa_alloc(cpu, gfp); if (!topa) return -ENOMEM; topa_insert_table(buf, topa); } + if (topa->z_count == topa->last - 1) { + if (order == TOPA_ENTRY(topa, topa->last - 1)->size) + topa->z_count++; + } + TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; TOPA_ENTRY(topa, -1)->size = order; if (!buf->snapshot && @@ -713,23 +746,26 @@ static void pt_topa_dump(struct pt_buffer *buf) struct topa *topa; list_for_each_entry(topa, &buf->tables, list) { + struct topa_page *tp = topa_to_page(topa); int i; - pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table, - topa->phys, topa->offset, topa->size); + pr_debug("# table @%p, off %llx size %zx\n", tp->table, + topa->offset, topa->size); for (i = 0; i < TENTS_PER_PAGE; i++) { pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n", - &topa->table[i], - (unsigned long)topa->table[i].base << TOPA_SHIFT, - sizes(topa->table[i].size), - topa->table[i].end ? 'E' : ' ', - topa->table[i].intr ? 'I' : ' ', - topa->table[i].stop ? 'S' : ' ', - *(u64 *)&topa->table[i]); + &tp->table[i], + (unsigned long)tp->table[i].base << TOPA_SHIFT, + sizes(tp->table[i].size), + tp->table[i].end ? 'E' : ' ', + tp->table[i].intr ? 'I' : ' ', + tp->table[i].stop ? 'S' : ' ', + *(u64 *)&tp->table[i]); if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) && - topa->table[i].stop) || - topa->table[i].end) + tp->table[i].stop) || + tp->table[i].end) break; + if (!i && topa->z_count) + i += topa->z_count; } } } @@ -771,7 +807,7 @@ static void pt_update_head(struct pt *pt) /* offset of the current output region within this table */ for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++) - base += sizes(buf->cur->table[topa_idx].size); + base += TOPA_ENTRY_SIZE(buf->cur, topa_idx); if (buf->snapshot) { local_set(&buf->data_size, base); @@ -791,7 +827,7 @@ static void pt_update_head(struct pt *pt) */ static void *pt_buffer_region(struct pt_buffer *buf) { - return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT); + return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); } /** @@ -800,7 +836,7 @@ static void *pt_buffer_region(struct pt_buffer *buf) */ static size_t pt_buffer_region_size(struct pt_buffer *buf) { - return sizes(buf->cur->table[buf->cur_idx].size); + return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx); } /** @@ -830,7 +866,7 @@ static void pt_handle_status(struct pt *pt) * know. */ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || - buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { + buf->output_off == pt_buffer_region_size(buf)) { perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_TRUNCATED); advance++; @@ -868,9 +904,11 @@ static void pt_handle_status(struct pt *pt) static void pt_read_offset(struct pt_buffer *buf) { u64 offset, base_topa; + struct topa_page *tp; rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa); - buf->cur = phys_to_virt(base_topa); + tp = phys_to_virt(base_topa); + buf->cur = &tp->topa; rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset); /* offset within current output region */ @@ -879,29 +917,97 @@ static void pt_read_offset(struct pt_buffer *buf) buf->cur_idx = (offset & 0xffffff80) >> 7; } -/** - * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry - * @buf: PT buffer. - * @pg: Page offset in the buffer. - * - * When advancing to the next output region (ToPA entry), given a page offset - * into the buffer, we need to find the offset of the first page in the next - * region. - */ -static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg) +static struct topa_entry * +pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) { - struct topa_entry *te = buf->topa_index[pg]; + struct topa_page *tp; + struct topa *topa; + unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0; - /* one region */ - if (buf->first == buf->last && buf->first->last == 1) - return pg; + /* + * Indicates a bug in the caller. + */ + if (WARN_ON_ONCE(pg >= buf->nr_pages)) + return NULL; + + /* + * First, find the ToPA table where @pg fits. With high + * order allocations, there shouldn't be many of these. + */ + list_for_each_entry(topa, &buf->tables, list) { + if (topa->offset + topa->size > pg << PAGE_SHIFT) + goto found; + } + + /* + * Hitting this means we have a problem in the ToPA + * allocation code. + */ + WARN_ON_ONCE(1); - do { - pg++; - pg &= buf->nr_pages - 1; - } while (buf->topa_index[pg] == te); + return NULL; - return pg; +found: + /* + * Indicates a problem in the ToPA allocation code. + */ + if (WARN_ON_ONCE(topa->last == -1)) + return NULL; + + tp = topa_to_page(topa); + cur_pg = PFN_DOWN(topa->offset); + if (topa->z_count) { + z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1); + start_idx = topa->z_count + 1; + } + + /* + * Multiple entries at the beginning of the table have the same size, + * ideally all of them; if @pg falls there, the search is done. + */ + if (pg >= cur_pg && pg < cur_pg + z_pg) { + idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0); + return &tp->table[idx]; + } + + /* + * Otherwise, slow path: iterate through the remaining entries. + */ + for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) { + if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg) + return &tp->table[idx]; + + cur_pg += TOPA_ENTRY_PAGES(topa, idx); + } + + /* + * Means we couldn't find a ToPA entry in the table that does match. + */ + WARN_ON_ONCE(1); + + return NULL; +} + +static struct topa_entry * +pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te) +{ + unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1); + struct topa_page *tp; + struct topa *topa; + + tp = (struct topa_page *)table; + if (tp->table != te) + return --te; + + topa = &tp->topa; + if (topa == buf->first) + topa = buf->last; + else + topa = list_prev_entry(topa, list); + + tp = topa_to_page(topa); + + return &tp->table[topa->last - 1]; } /** @@ -925,8 +1031,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, unsigned long idx, npages, wakeup; /* can't stop in the middle of an output region */ - if (buf->output_off + handle->size + 1 < - sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { + if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) { perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); return -EINVAL; } @@ -937,9 +1042,13 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, return 0; /* clear STOP and INT from current entry */ - buf->topa_index[buf->stop_pos]->stop = 0; - buf->topa_index[buf->stop_pos]->intr = 0; - buf->topa_index[buf->intr_pos]->intr = 0; + if (buf->stop_te) { + buf->stop_te->stop = 0; + buf->stop_te->intr = 0; + } + + if (buf->intr_te) + buf->intr_te->intr = 0; /* how many pages till the STOP marker */ npages = handle->size >> PAGE_SHIFT; @@ -950,7 +1059,12 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, idx = (head >> PAGE_SHIFT) + npages; idx &= buf->nr_pages - 1; - buf->stop_pos = idx; + + if (idx != buf->stop_pos) { + buf->stop_pos = idx; + buf->stop_te = pt_topa_entry_for_page(buf, idx); + buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te); + } wakeup = handle->wakeup >> PAGE_SHIFT; @@ -960,51 +1074,20 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, idx = wakeup; idx &= buf->nr_pages - 1; - buf->intr_pos = idx; + if (idx != buf->intr_pos) { + buf->intr_pos = idx; + buf->intr_te = pt_topa_entry_for_page(buf, idx); + buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te); + } - buf->topa_index[buf->stop_pos]->stop = 1; - buf->topa_index[buf->stop_pos]->intr = 1; - buf->topa_index[buf->intr_pos]->intr = 1; + buf->stop_te->stop = 1; + buf->stop_te->intr = 1; + buf->intr_te->intr = 1; return 0; } /** - * pt_buffer_setup_topa_index() - build topa_index[] table of regions - * @buf: PT buffer. - * - * topa_index[] references output regions indexed by offset into the - * buffer for purposes of quick reverse lookup. - */ -static void pt_buffer_setup_topa_index(struct pt_buffer *buf) -{ - struct topa *cur = buf->first, *prev = buf->last; - struct topa_entry *te_cur = TOPA_ENTRY(cur, 0), - *te_prev = TOPA_ENTRY(prev, prev->last - 1); - int pg = 0, idx = 0; - - while (pg < buf->nr_pages) { - int tidx; - - /* pages within one topa entry */ - for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++) - buf->topa_index[pg] = te_prev; - - te_prev = te_cur; - - if (idx == cur->last - 1) { - /* advance to next topa table */ - idx = 0; - cur = list_entry(cur->list.next, struct topa, list); - } else { - idx++; - } - te_cur = TOPA_ENTRY(cur, idx); - } - -} - -/** * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head * @buf: PT buffer. * @head: Write pointer (aux_head) from AUX buffer. @@ -1021,18 +1104,20 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf) */ static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) { + struct topa_page *cur_tp; + struct topa_entry *te; int pg; if (buf->snapshot) head &= (buf->nr_pages << PAGE_SHIFT) - 1; pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); - pg = pt_topa_next_entry(buf, pg); + te = pt_topa_entry_for_page(buf, pg); - buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK); - buf->cur_idx = ((unsigned long)buf->topa_index[pg] - - (unsigned long)buf->cur) / sizeof(struct topa_entry); - buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1); + cur_tp = topa_entry_to_page(te); + buf->cur = &cur_tp->topa; + buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); + buf->output_off = head & (pt_buffer_region_size(buf) - 1); local64_set(&buf->head, head); local_set(&buf->data_size, 0); @@ -1061,31 +1146,29 @@ static void pt_buffer_fini_topa(struct pt_buffer *buf) * @size: Total size of all regions within this ToPA. * @gfp: Allocation flags. */ -static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, - gfp_t gfp) +static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu, + unsigned long nr_pages, gfp_t gfp) { struct topa *topa; int err; - topa = topa_alloc(buf->cpu, gfp); + topa = topa_alloc(cpu, gfp); if (!topa) return -ENOMEM; topa_insert_table(buf, topa); while (buf->nr_pages < nr_pages) { - err = topa_insert_pages(buf, gfp); + err = topa_insert_pages(buf, cpu, gfp); if (err) { pt_buffer_fini_topa(buf); return -ENOMEM; } } - pt_buffer_setup_topa_index(buf); - /* link last table to the first one, unless we're double buffering */ if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) { - TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT; + TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first); TOPA_ENTRY(buf->last, -1)->end = 1; } @@ -1119,18 +1202,18 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages, cpu = raw_smp_processor_id(); node = cpu_to_node(cpu); - buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]), - GFP_KERNEL, node); + buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node); if (!buf) return NULL; - buf->cpu = cpu; buf->snapshot = snapshot; buf->data_pages = pages; + buf->stop_pos = -1; + buf->intr_pos = -1; INIT_LIST_HEAD(&buf->tables); - ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL); + ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL); if (ret) { kfree(buf); return NULL; @@ -1296,7 +1379,7 @@ void intel_pt_interrupt(void) return; } - pt_config_buffer(buf->cur->table, buf->cur_idx, + pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, buf->output_off); pt_config(event); } @@ -1361,7 +1444,7 @@ static void pt_event_start(struct perf_event *event, int mode) WRITE_ONCE(pt->handle_nmi, 1); hwc->state = 0; - pt_config_buffer(buf->cur->table, buf->cur_idx, + pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, buf->output_off); pt_config(event); @@ -1481,6 +1564,11 @@ void cpu_emergency_stop_pt(void) pt_event_stop(pt->handle.event, PERF_EF_UPDATE); } +int is_intel_pt_event(struct perf_event *event) +{ + return event->pmu == &pt_pmu.pmu; +} + static __init int pt_init(void) { int ret, cpu, prior_warn = 0; diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 63fe4063fbd6..1d2bb7572374 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -53,7 +53,6 @@ struct pt_pmu { /** * struct pt_buffer - buffer configuration; one buffer per task_struct or * cpu, depending on perf event configuration - * @cpu: cpu for per-cpu allocation * @tables: list of ToPA tables in this buffer * @first: shorthand for first topa table * @last: shorthand for last topa table @@ -65,13 +64,14 @@ struct pt_pmu { * @lost: if data was lost/truncated * @head: logical write offset inside the buffer * @snapshot: if this is for a snapshot/overwrite counter - * @stop_pos: STOP topa entry in the buffer - * @intr_pos: INT topa entry in the buffer + * @stop_pos: STOP topa entry index + * @intr_pos: INT topa entry index + * @stop_te: STOP topa entry pointer + * @intr_te: INT topa entry pointer * @data_pages: array of pages from perf * @topa_index: table of topa entries indexed by page offset */ struct pt_buffer { - int cpu; struct list_head tables; struct topa *first, *last, *cur; unsigned int cur_idx; @@ -80,9 +80,9 @@ struct pt_buffer { local_t data_size; local64_t head; bool snapshot; - unsigned long stop_pos, intr_pos; + long stop_pos, intr_pos; + struct topa_entry *stop_te, *intr_te; void **data_pages; - struct topa_entry *topa_index[0]; }; #define PT_FILTERS_NUM 4 diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 64ab51ffdf06..5053a403e4ae 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -634,7 +634,7 @@ static void cleanup_rapl_pmus(void) kfree(rapl_pmus); } -const struct attribute_group *rapl_attr_update[] = { +static const struct attribute_group *rapl_attr_update[] = { &rapl_events_cores_group, &rapl_events_pkg_group, &rapl_events_ram_group, @@ -720,27 +720,27 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep), X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb), X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_G, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_D, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 3694a5d0703d..6fc2e06ab4c6 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1451,29 +1451,29 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_G, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, bdw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, bdx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D, snr_uncore_init), {}, }; |