diff options
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r-- | kernel/irq_work.c | 45 |
1 files changed, 25 insertions, 20 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index eca83965b631..e8da1e71583a 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -31,10 +31,10 @@ static bool irq_work_claim(struct irq_work *work) { int oflags; - oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); + oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); /* * If the work is already pending, no need to raise the IPI. - * The pairing atomic_fetch_andnot() in irq_work_run() makes sure + * The pairing smp_mb() in irq_work_single() makes sure * everything we did before is visible. */ if (oflags & IRQ_WORK_PENDING) @@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void) static void __irq_work_queue_local(struct irq_work *work) { /* If the work is "lazy", handle it from next tick if any */ - if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && + if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { + if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) arch_irq_work_raise(); } } @@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); - __smp_call_single_queue(cpu, &work->llnode); + __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } @@ -136,23 +136,28 @@ void irq_work_single(void *arg) int flags; /* - * Clear the PENDING bit, after this point the @work - * can be re-used. - * Make it immediately visible so that other CPUs trying - * to claim that work don't rely on us to handle their data - * while we are in the middle of the func. + * Clear the PENDING bit, after this point the @work can be re-used. + * The PENDING bit acts as a lock, and we own it, so we can clear it + * without atomic ops. */ - flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); + flags = atomic_read(&work->node.a_flags); + flags &= ~IRQ_WORK_PENDING; + atomic_set(&work->node.a_flags, flags); + + /* + * See irq_work_claim(). + */ + smp_mb(); - lockdep_irq_work_enter(work); + lockdep_irq_work_enter(flags); work->func(work); - lockdep_irq_work_exit(work); + lockdep_irq_work_exit(flags); + /* - * Clear the BUSY bit and return to the free state if - * no-one else claimed it meanwhile. + * Clear the BUSY bit, if set, and return to the free state if no-one + * else claimed it meanwhile. */ - flags &= ~IRQ_WORK_PENDING; - (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); + (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); } static void irq_work_run_list(struct llist_head *list) @@ -166,7 +171,7 @@ static void irq_work_run_list(struct llist_head *list) return; llnode = llist_del_all(list); - llist_for_each_entry_safe(work, tmp, llnode, llnode) + llist_for_each_entry_safe(work, tmp, llnode, node.llist) irq_work_single(work); } @@ -198,7 +203,7 @@ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); - while (atomic_read(&work->flags) & IRQ_WORK_BUSY) + while (irq_work_is_busy(work)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); |