summaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/8xx-pmu.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c100
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/internal.h18
-rw-r--r--arch/powerpc/perf/isa207-common.c60
-rw-r--r--arch/powerpc/perf/power10-pmu.c2
-rw-r--r--arch/powerpc/perf/power5+-pmu.c2
-rw-r--r--arch/powerpc/perf/power5-pmu.c2
-rw-r--r--arch/powerpc/perf/power6-pmu.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c2
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c2
14 files changed, 163 insertions, 37 deletions
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index f970d1510d3d..4738c4dbf567 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -153,7 +153,7 @@ static void mpc8xx_pmu_read(struct perf_event *event)
static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
- struct ppc_inst insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));
+ ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));
mpc8xx_pmu_read(event);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 8d4ff93462fb..b5b42cf0a703 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -17,6 +17,7 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/hw_irq.h>
#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
@@ -775,6 +776,34 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
+/*
+ * If the perf subsystem wants performance monitor interrupts as soon as
+ * possible (e.g., to sample the instruction address and stack chain),
+ * this should return true. The IRQ masking code can then enable MSR[EE]
+ * in some places (e.g., interrupt handlers) that allows PMI interrupts
+ * through to improve accuracy of profiles, at the cost of some performance.
+ *
+ * The PMU counters can be enabled by other means (e.g., sysfs raw SPR
+ * access), but in that case there is no need for prompt PMI handling.
+ *
+ * This currently returns true if any perf counter is being used. It
+ * could possibly return false if only events are being counted rather than
+ * samples being taken, but for now this is good enough.
+ */
+bool power_pmu_wants_prompt_pmi(void)
+{
+ struct cpu_hw_events *cpuhw;
+
+ /*
+ * This could simply test local_paca->pmcregs_in_use if that were not
+ * under ifdef KVM.
+ */
+ if (!ppmu)
+ return false;
+
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+ return cpuhw->n_events;
+}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -857,6 +886,19 @@ static void write_pmc(int idx, unsigned long val)
}
}
+static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
+{
+ int i, idx;
+
+ for (i = 0; i < cpuhw->n_events; i++) {
+ idx = cpuhw->event[i]->hw.idx;
+ if ((idx) && ((int)read_pmc(idx) < 0))
+ return idx;
+ }
+
+ return 0;
+}
+
/* Called from sysrq_handle_showregs() */
void perf_event_print_debug(void)
{
@@ -1281,11 +1323,13 @@ static void power_pmu_disable(struct pmu *pmu)
/*
* Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
+ * Also clear PMXE to disable PMI's getting triggered in some
+ * corner cases during PMU disable.
*/
val = mmcr0 = mfspr(SPRN_MMCR0);
val |= MMCR0_FC;
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
- MMCR0_FC56);
+ MMCR0_PMXE | MMCR0_FC56);
/* Set mmcr0 PMCCEXT for p10 */
if (ppmu->flags & PPMU_ARCH_31)
val |= MMCR0_PMCCEXT;
@@ -1299,6 +1343,34 @@ static void power_pmu_disable(struct pmu *pmu)
mb();
isync();
+ /*
+ * Some corner cases could clear the PMU counter overflow
+ * while a masked PMI is pending. One such case is when
+ * a PMI happens during interrupt replay and perf counter
+ * values are cleared by PMU callbacks before replay.
+ *
+ * If any PMC corresponding to the active PMU events are
+ * overflown, disable the interrupt by clearing the paca
+ * bit for PMI since we are disabling the PMU now.
+ * Otherwise provide a warning if there is PMI pending, but
+ * no counter is found overflown.
+ */
+ if (any_pmc_overflown(cpuhw)) {
+ /*
+ * Since power_pmu_disable runs under local_irq_save, it
+ * could happen that code hits a PMC overflow without PMI
+ * pending in paca. Hence only clear PMI pending if it was
+ * set.
+ *
+ * If a PMI is pending, then MSR[EE] must be disabled (because
+ * the masked PMI handler disabling EE). So it is safe to
+ * call clear_pmi_irq_pending().
+ */
+ if (pmi_irq_pending())
+ clear_pmi_irq_pending();
+ } else
+ WARN_ON(pmi_irq_pending());
+
val = mmcra = cpuhw->mmcr.mmcra;
/*
@@ -1390,6 +1462,15 @@ static void power_pmu_enable(struct pmu *pmu)
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
+ /*
+ * If there is any active event with an overflown PMC
+ * value, set back PACA_IRQ_PMI which would have been
+ * cleared in power_pmu_disable().
+ */
+ hard_irq_disable();
+ if (any_pmc_overflown(cpuhw))
+ set_pmi_irq_pending();
+
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
if (ppmu->flags & PPMU_ARCH_31)
@@ -2337,6 +2418,14 @@ static void __perf_event_interrupt(struct pt_regs *regs)
break;
}
}
+
+ /*
+ * Clear PACA_IRQ_PMI in case it was set by
+ * set_pmi_irq_pending() when PMU was enabled
+ * after accounting for interrupts.
+ */
+ clear_pmi_irq_pending();
+
if (!active)
/* reset non active counters that have overflowed */
write_pmc(i + 1, 0);
@@ -2356,6 +2445,13 @@ static void __perf_event_interrupt(struct pt_regs *regs)
}
}
}
+
+ /*
+ * During system wide profling or while specific CPU is monitored for an
+ * event, some corner cases could cause PMC to overflow in idle path. This
+ * will trigger a PMI after waking up from idle. Since counter values are _not_
+ * saved/restored in idle path, can lead to below "Can't find PMC" message.
+ */
if (unlikely(!found) && !arch_irq_disabled_regs(regs))
printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
@@ -2392,7 +2488,7 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
return 0;
}
-int register_power_pmu(struct power_pmu *pmu)
+int __init register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index 695975227e60..b6e25f75109d 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -307,7 +307,7 @@ static struct power_pmu generic_compat_pmu = {
.attr_groups = generic_compat_pmu_attr_groups,
};
-int init_generic_compat_pmu(void)
+int __init init_generic_compat_pmu(void)
{
int rc = 0;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 1816f560a465..1e8aa934e37e 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -756,7 +756,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
}
if (calc_ev_end > ev_end) {
- pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
+ pr_warn("event %zu exceeds its own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
event_idx, event, ev_end, offset, calc_ev_end);
return -1;
}
diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h
index 80bbf72bfec2..4c18b5504326 100644
--- a/arch/powerpc/perf/internal.h
+++ b/arch/powerpc/perf/internal.h
@@ -2,12 +2,12 @@
//
// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
-extern int init_ppc970_pmu(void);
-extern int init_power5_pmu(void);
-extern int init_power5p_pmu(void);
-extern int init_power6_pmu(void);
-extern int init_power7_pmu(void);
-extern int init_power8_pmu(void);
-extern int init_power9_pmu(void);
-extern int init_power10_pmu(void);
-extern int init_generic_compat_pmu(void);
+int __init init_ppc970_pmu(void);
+int __init init_power5_pmu(void);
+int __init init_power5p_pmu(void);
+int __init init_power6_pmu(void);
+int __init init_power7_pmu(void);
+int __init init_power8_pmu(void);
+int __init init_power9_pmu(void);
+int __init init_power10_pmu(void);
+int __init init_generic_compat_pmu(void);
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 7ea873ab2e6f..4037ea652522 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -220,22 +220,37 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
/* Nothing to do */
break;
case 1:
- ret = PH(LVL, L1);
+ ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT);
break;
case 2:
- ret = PH(LVL, L2);
+ ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
break;
case 3:
- ret = PH(LVL, L3);
+ ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
break;
case 4:
- if (sub_idx <= 1)
- ret = PH(LVL, LOC_RAM);
- else if (sub_idx > 1 && sub_idx <= 2)
- ret = PH(LVL, REM_RAM1);
- else
- ret = PH(LVL, REM_RAM2);
- ret |= P(SNOOP, HIT);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ ret = P(SNOOP, HIT);
+
+ if (sub_idx == 1)
+ ret |= PH(LVL, LOC_RAM) | LEVEL(RAM);
+ else if (sub_idx == 2 || sub_idx == 3)
+ ret |= P(LVL, HIT) | LEVEL(PMEM);
+ else if (sub_idx == 4)
+ ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2);
+ else if (sub_idx == 5 || sub_idx == 7)
+ ret |= P(LVL, HIT) | LEVEL(PMEM) | REM;
+ else if (sub_idx == 6)
+ ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3);
+ } else {
+ if (sub_idx <= 1)
+ ret = PH(LVL, LOC_RAM);
+ else if (sub_idx > 1 && sub_idx <= 2)
+ ret = PH(LVL, REM_RAM1);
+ else
+ ret = PH(LVL, REM_RAM2);
+ ret |= P(SNOOP, HIT);
+ }
break;
case 5:
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
@@ -261,11 +276,26 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
}
break;
case 6:
- ret = PH(LVL, REM_CCE2);
- if ((sub_idx == 0) || (sub_idx == 2))
- ret |= P(SNOOP, HIT);
- else if ((sub_idx == 1) || (sub_idx == 3))
- ret |= P(SNOOP, HITM);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (sub_idx == 0)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 2);
+ else if (sub_idx == 1)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 2);
+ else if (sub_idx == 2)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 3);
+ else if (sub_idx == 3)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 3);
+ } else {
+ ret = PH(LVL, REM_CCE2);
+ if (sub_idx == 0 || sub_idx == 2)
+ ret |= P(SNOOP, HIT);
+ else if (sub_idx == 1 || sub_idx == 3)
+ ret |= P(SNOOP, HITM);
+ }
break;
case 7:
ret = PM(LVL, L1);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 9dd75f385837..0975ad0b42c4 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -592,7 +592,7 @@ static struct power_pmu power10_pmu = {
.check_attr_config = power10_check_attr_config,
};
-int init_power10_pmu(void)
+int __init init_power10_pmu(void)
{
unsigned int pvr;
int rc;
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index 18732267993a..753b4740ef64 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -677,7 +677,7 @@ static struct power_pmu power5p_pmu = {
.cache_events = &power5p_cache_events,
};
-int init_power5p_pmu(void)
+int __init init_power5p_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index cb611c1e7abe..1f83c4cba0aa 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -618,7 +618,7 @@ static struct power_pmu power5_pmu = {
.flags = PPMU_HAS_SSLOT,
};
-int init_power5_pmu(void)
+int __init init_power5_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 69ef38216418..aec746f86804 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -539,7 +539,7 @@ static struct power_pmu power6_pmu = {
.cache_events = &power6_cache_events,
};
-int init_power6_pmu(void)
+int __init init_power6_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 894c17f9a762..99b5ba314ea7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -445,7 +445,7 @@ static struct power_pmu power7_pmu = {
.cache_events = &power7_cache_events,
};
-int init_power7_pmu(void)
+int __init init_power7_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 5282e8415ddf..f21194b5604a 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -378,7 +378,7 @@ static struct power_pmu power8_pmu = {
.bhrb_nr = 32,
};
-int init_power8_pmu(void)
+int __init init_power8_pmu(void)
{
int rc;
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index ff3382140d7e..4b7c17e36100 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -452,7 +452,7 @@ static struct power_pmu power9_pmu = {
.check_attr_config = power9_check_attr_config,
};
-int init_power9_pmu(void)
+int __init init_power9_pmu(void)
{
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 1f8263785286..09802482ba72 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -489,7 +489,7 @@ static struct power_pmu ppc970_pmu = {
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
-int init_ppc970_pmu(void)
+int __init init_ppc970_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")