summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRabin Vincent <rabinv@axis.com>2017-04-05 16:14:08 +0300
committerRalf Baechle <ralf@linux-mips.org>2017-04-10 13:25:07 +0300
commitf2b42866b1bb30c1f62a84f6d829b8789495da95 (patch)
tree741b9f42bcdba9b7453e693eded39d9afe774468 /arch
parent9eec1c01a073fe1fdba4e50e53bd953d1c59ad00 (diff)
downloadlinux-f2b42866b1bb30c1f62a84f6d829b8789495da95.tar.xz
MIPS: perf: fix deadlock
mipsxx_pmu_handle_shared_irq() calls irq_work_run() while holding the pmuint_rwlock for read. irq_work_run() can, via perf_pending_event(), call try_to_wake_up() which can try to take rq->lock. However, perf can also call perf_pmu_enable() (and thus take the pmuint_rwlock for write) while holding the rq->lock, from finish_task_switch() via perf_event_context_sched_in(). This leads to an ABBA deadlock: PID: 3855 TASK: 8f7ce288 CPU: 2 COMMAND: "process" #0 [89c39ac8] __delay at 803b5be4 #1 [89c39ac8] do_raw_spin_lock at 8008fdcc #2 [89c39af8] try_to_wake_up at 8006e47c #3 [89c39b38] pollwake at 8018eab0 #4 [89c39b68] __wake_up_common at 800879f4 #5 [89c39b98] __wake_up at 800880e4 #6 [89c39bc8] perf_event_wakeup at 8012109c #7 [89c39be8] perf_pending_event at 80121184 #8 [89c39c08] irq_work_run_list at 801151f0 #9 [89c39c38] irq_work_run at 80115274 #10 [89c39c50] mipsxx_pmu_handle_shared_irq at 8002cc7c PID: 1481 TASK: 8eaac6a8 CPU: 3 COMMAND: "process" #0 [8de7f900] do_raw_write_lock at 800900e0 #1 [8de7f918] perf_event_context_sched_in at 80122310 #2 [8de7f938] __perf_event_task_sched_in at 80122608 #3 [8de7f958] finish_task_switch at 8006b8a4 #4 [8de7f998] __schedule at 805e4dc4 #5 [8de7f9f8] schedule at 805e5558 #6 [8de7fa10] schedule_hrtimeout_range_clock at 805e9984 #7 [8de7fa70] poll_schedule_timeout at 8018e8f8 #8 [8de7fa88] do_select at 8018f338 #9 [8de7fd88] core_sys_select at 8018f5cc #10 [8de7fee0] sys_select at 8018f854 #11 [8de7ff28] syscall_common at 80028fc8 The lock seems to be there to protect the hardware counters so there is no need to hold it across irq_work_run(). Signed-off-by: Rabin Vincent <rabinv@axis.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 8c35b3152e1e..9452b02ce079 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1446,6 +1446,11 @@ static int mipsxx_pmu_handle_shared_irq(void)
HANDLE_COUNTER(0)
}
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+
/*
* Do all the work for the pending perf events. We can do this
* in here because the performance counter interrupt is a regular
@@ -1454,10 +1459,6 @@ static int mipsxx_pmu_handle_shared_irq(void)
if (handled == IRQ_HANDLED)
irq_work_run();
-#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
- read_unlock(&pmuint_rwlock);
-#endif
- resume_local_counters();
return handled;
}