diff options
author | Tejun Heo <tj@kernel.org> | 2024-02-21 08:36:14 +0300 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2024-02-21 08:36:14 +0300 |
commit | c26e2f2e2fcfb73996fa025a0d3b5695017d65b5 (patch) | |
tree | fa4497441e3a5754dbf4c2cefc2ea8fb66362af0 /kernel | |
parent | cdc6e4b329bc82676886a758a940b2b6987c2109 (diff) | |
download | linux-c26e2f2e2fcfb73996fa025a0d3b5695017d65b5.tar.xz |
workqueue: Use variable name irq_flags for saving local irq flags
Using the generic term `flags` for irq flags is conventional but can be
confusing as there's quite a bit of code dealing with work flags which
involves some subtleties. Let's use a more explicit name `irq_flags` for
local irq flags. No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 76 |
1 files changed, 38 insertions, 38 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 962061dca05c..b590d93d054b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2029,7 +2029,7 @@ out_put: * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal * @is_dwork: @work is a delayed_work - * @flags: place to store irq state + * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any * stable state - idle, on timer or on worklist. @@ -2051,17 +2051,17 @@ out_put: * irqsafe, ensures that we return -EAGAIN for finite short period of time. * * On successful return, >= 0, irq is disabled and the caller is - * responsible for releasing it using local_irq_restore(*@flags). + * responsible for releasing it using local_irq_restore(*@irq_flags). * * This function is safe to call from any context including IRQ handler. */ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - unsigned long *flags) + unsigned long *irq_flags) { struct worker_pool *pool; struct pool_workqueue *pwq; - local_irq_save(*flags); + local_irq_save(*irq_flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -2136,7 +2136,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, raw_spin_unlock(&pool->lock); fail: rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore(*irq_flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); @@ -2344,16 +2344,16 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { bool ret = false; - unsigned long flags; + unsigned long irq_flags; - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_work_on); @@ -2410,7 +2410,7 @@ static int select_numa_node_cpu(int node) bool queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) { - unsigned long flags; + unsigned long irq_flags; bool ret = false; /* @@ -2424,7 +2424,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, */ WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { int cpu = select_numa_node_cpu(node); @@ -2433,7 +2433,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL_GPL(queue_work_node); @@ -2503,17 +2503,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, { struct work_struct *work = &dwork->work; bool ret = false; - unsigned long flags; + unsigned long irq_flags; /* read the comment in __queue_work() */ - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); @@ -2539,16 +2539,16 @@ EXPORT_SYMBOL(queue_delayed_work_on); bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(&dwork->work, true, &flags); + ret = try_to_grab_pending(&dwork->work, true, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(flags); + local_irq_restore(irq_flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -4105,18 +4105,18 @@ EXPORT_SYMBOL(flush_rcu_work); static bool __cancel_work(struct work_struct *work, bool is_dwork) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } @@ -4137,11 +4137,11 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE @@ -4175,7 +4175,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_irq_restore(flags); + local_irq_restore(irq_flags); /* * Skip __flush_work() during early boot when we know that @work isn't @@ -5381,15 +5381,15 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) activated = false; for_each_pwq(pwq, wq) { - unsigned long flags; + unsigned long irq_flags; /* can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (pwq_activate_first_inactive(pwq, true)) { activated = true; kick_pool(pwq->pool); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); } } while (activated); } @@ -5762,7 +5762,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); unsigned int work_busy(struct work_struct *work) { struct worker_pool *pool; - unsigned long flags; + unsigned long irq_flags; unsigned int ret = 0; if (work_pending(work)) @@ -5771,10 +5771,10 @@ unsigned int work_busy(struct work_struct *work) rcu_read_lock(); pool = get_work_pool(work); if (pool) { - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } rcu_read_unlock(); @@ -6006,7 +6006,7 @@ void show_one_workqueue(struct workqueue_struct *wq) { struct pool_workqueue *pwq; bool idle = true; - unsigned long flags; + unsigned long irq_flags; for_each_pwq(pwq, wq) { if (!pwq_is_empty(pwq)) { @@ -6020,7 +6020,7 @@ void show_one_workqueue(struct workqueue_struct *wq) pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); for_each_pwq(pwq, wq) { - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (!pwq_is_empty(pwq)) { /* * Defer printing to avoid deadlocks in console @@ -6031,7 +6031,7 @@ void show_one_workqueue(struct workqueue_struct *wq) show_pwq(pwq); printk_deferred_exit(); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -6050,10 +6050,10 @@ static void show_one_worker_pool(struct worker_pool *pool) { struct worker *worker; bool first = true; - unsigned long flags; + unsigned long irq_flags; unsigned long hung = 0; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (pool->nr_workers == pool->nr_idle) goto next_pool; @@ -6081,7 +6081,7 @@ static void show_one_worker_pool(struct worker_pool *pool) pr_cont("\n"); printk_deferred_exit(); next_pool: - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -7212,10 +7212,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; static void show_cpu_pool_hog(struct worker_pool *pool) { struct worker *worker; - unsigned long flags; + unsigned long irq_flags; int bkt; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); hash_for_each(pool->busy_hash, bkt, worker, hentry) { if (task_is_running(worker->task)) { @@ -7233,7 +7233,7 @@ static void show_cpu_pool_hog(struct worker_pool *pool) } } - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } static void show_cpu_pools_hogs(void) |