From 6f8b79683dfb37ee0661cf4c13a72f024c29f65c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 Nov 2024 12:42:34 +0200 Subject: genirq: Move irq_thread_fn() further up in the code In a preparation to reuse irq_thread_fn() move it further up in the code. No functional change intended. Signed-off-by: Andy Shevchenko Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20241119104339.2112455-2-andriy.shevchenko@linux.intel.com --- kernel/irq/manage.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) (limited to 'kernel/irq/manage.c') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f0803d6bd296..230f4701f18e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1182,45 +1182,42 @@ out_unlock: } /* - * Interrupts which are not explicitly requested as threaded - * interrupts rely on the implicit bh/preempt disable of the hard irq - * context. So we need to disable bh here to avoid deadlocks and other - * side effects. + * Interrupts explicitly requested as threaded interrupts want to be + * preemptible - many of them need to sleep and wait for slow busses to + * complete. */ -static irqreturn_t -irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) +static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) { - irqreturn_t ret; + irqreturn_t ret = action->thread_fn(action->irq, action->dev_id); - local_bh_disable(); - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - local_irq_disable(); - ret = action->thread_fn(action->irq, action->dev_id); if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - local_irq_enable(); - local_bh_enable(); return ret; } /* - * Interrupts explicitly requested as threaded interrupts want to be - * preemptible - many of them need to sleep and wait for slow busses to - * complete. + * Interrupts which are not explicitly requested as threaded + * interrupts rely on the implicit bh/preempt disable of the hard irq + * context. So we need to disable bh here to avoid deadlocks and other + * side effects. */ -static irqreturn_t irq_thread_fn(struct irq_desc *desc, - struct irqaction *action) +static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; + local_bh_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_disable(); ret = action->thread_fn(action->irq, action->dev_id); if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_enable(); + local_bh_enable(); return ret; } -- cgit v1.2.3 From 429f49ad361cd999ca221d8b562ae2552b7c3e2c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 Nov 2024 12:42:35 +0200 Subject: genirq: Reuse irq_thread_fn() for forced thread case rq_forced_thread_fn() uses the same action callback as the non-forced variant but with different locking decorations. Reuse irq_thread_fn() here to make that clear. Signed-off-by: Andy Shevchenko Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20241119104339.2112455-3-andriy.shevchenko@linux.intel.com --- kernel/irq/manage.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel/irq/manage.c') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 230f4701f18e..f300bb6be3bd 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1210,11 +1210,7 @@ static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction local_bh_disable(); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_disable(); - ret = action->thread_fn(action->irq, action->dev_id); - if (ret == IRQ_HANDLED) - atomic_inc(&desc->threads_handled); - - irq_finalize_oneshot(desc, action); + ret = irq_thread_fn(desc, action); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_enable(); local_bh_enable(); -- cgit v1.2.3