diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-07 22:32:19 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-07 22:32:19 +0300 |
commit | bf7f1c7e2fdfe8b5050e8b3eebf111bf2ed1e8c9 (patch) | |
tree | a5fee5e9af8289df87ebb5ae89e8369b318499f6 | |
parent | 5b43f97f3f21c42ba738df2797930e32e05d5a25 (diff) | |
parent | 8fc31ce8896fc3cea1d79688c8ff950ad4e73afe (diff) | |
download | linux-bf7f1c7e2fdfe8b5050e8b3eebf111bf2ed1e8c9.tar.xz |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"A bogus warning fix, a counter width handling fix affecting certain
machines, plus a oneliner hw-enablement patch for Knights Mill CPUs"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Remove invalid warning from list_update_cgroup_even()t
perf/x86: Fix full width counter, counter overflow
perf/x86/intel: Enable C-state residency events for Knights Mill
-rw-r--r-- | arch/x86/events/core.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/cstate.c | 1 | ||||
-rw-r--r-- | kernel/events/core.c | 19 |
4 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9d4bf3ab049e..6e395c996900 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -69,7 +69,7 @@ u64 x86_perf_event_update(struct perf_event *event) int shift = 64 - x86_pmu.cntval_bits; u64 prev_raw_count, new_raw_count; int idx = hwc->idx; - s64 delta; + u64 delta; if (idx == INTEL_PMC_IDX_FIXED_BTS) return 0; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a74a2dbc0180..cb8522290e6a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4034,7 +4034,7 @@ __init int intel_pmu_init(void) /* Support full width counters using alternative MSR range */ if (x86_pmu.intel_cap.full_width_write) { - x86_pmu.max_period = x86_pmu.cntval_mask; + x86_pmu.max_period = x86_pmu.cntval_mask >> 1; x86_pmu.perfctr = MSR_IA32_PMC0; pr_cont("full-width counters, "); } diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4f5ac726335f..da51e5a3e2ff 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -540,6 +540,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/kernel/events/core.c b/kernel/events/core.c index 6ee1febdf6ff..02c8421f8c01 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -903,17 +903,14 @@ list_update_cgroup_event(struct perf_event *event, */ cpuctx = __get_cpu_context(ctx); - /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ - if (perf_cgroup_from_task(current, ctx) != event->cgrp) { - /* - * We are removing the last cpu event in this context. - * If that event is not active in this cpu, cpuctx->cgrp - * should've been cleared by perf_cgroup_switch. - */ - WARN_ON_ONCE(!add && cpuctx->cgrp); - return; - } - cpuctx->cgrp = add ? event->cgrp : NULL; + /* + * cpuctx->cgrp is NULL until a cgroup event is sched in or + * ctx->nr_cgroup == 0 . + */ + if (add && perf_cgroup_from_task(current, ctx) == event->cgrp) + cpuctx->cgrp = event->cgrp; + else if (!add) + cpuctx->cgrp = NULL; } #else /* !CONFIG_CGROUP_PERF */ |