From f6f5a30c834135c9f2fa10400c59ebbdd9188567 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 6 Mar 2012 17:34:50 +0100 Subject: ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers The PMU IRQ handlers in perf assume that if a counter has overflowed then perf must be responsible. In the paranoid world of crazy hardware, this could be false, so check that we do have a valid event before attempting to dereference NULL in the interrupt path. Cc: Signed-off-by: Ming Lei Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event_v6.c | 20 ++------------------ arch/arm/kernel/perf_event_v7.c | 4 ++++ arch/arm/kernel/perf_event_xscale.c | 6 ++++++ 3 files changed, 12 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 88bf15253fbd..b78af0cc6ef3 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static int counter_is_active(unsigned long pmcr, int idx) -{ - unsigned long mask = 0; - if (idx == ARMV6_CYCLE_COUNTER) - mask = ARMV6_PMCR_CCOUNT_IEN; - else if (idx == ARMV6_COUNTER0) - mask = ARMV6_PMCR_COUNT0_IEN; - else if (idx == ARMV6_COUNTER1) - mask = ARMV6_PMCR_COUNT1_IEN; - - if (mask) - return pmcr & mask; - - WARN_ONCE(1, "invalid counter number (%d)\n", idx); - return 0; -} - static irqreturn_t armv6pmu_handle_irq(int irq_num, void *dev) @@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num, struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!counter_is_active(pmcr, idx)) + /* Ignore if we don't have an event. */ + if (!event) continue; /* diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 050cc8bf7246..4d7095af2ab3 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -960,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 831e019b017c..a5bbd360cc4b 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -255,6 +255,9 @@ xscale1pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + if (!event) + continue; + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) continue; @@ -592,6 +595,9 @@ xscale2pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + if (!event) + continue; + if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) continue; -- cgit v1.2.3