diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5568018222d9..5e6f5f5750a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -211,12 +211,12 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) for (;;) { rq = task_rq(p); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { rq_pin_lock(rq, rf); return rq; } - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); while (unlikely(task_on_rq_migrating(p))) cpu_relax(); @@ -235,7 +235,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) for (;;) { raw_spin_lock_irqsave(&p->pi_lock, rf->flags); rq = task_rq(p); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); /* * move_queued_task() task_rq_lock() * @@ -257,7 +257,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) rq_pin_lock(rq, rf); return rq; } - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); while (unlikely(task_on_rq_migrating(p))) @@ -327,7 +327,7 @@ void update_rq_clock(struct rq *rq) { s64 delta; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (rq->clock_update_flags & RQCF_ACT_SKIP) return; @@ -625,7 +625,7 @@ void resched_curr(struct rq *rq) struct task_struct *curr = rq->curr; int cpu; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (test_tsk_need_resched(curr)) return; @@ -649,10 +649,10 @@ void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); if (cpu_online(cpu) || cpu == smp_processor_id()) resched_curr(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); } #ifdef CONFIG_SMP @@ -1151,7 +1151,7 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, struct uclamp_se *uc_se = &p->uclamp[clamp_id]; struct uclamp_bucket *bucket; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); /* Update task effective clamp */ p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); @@ -1191,7 +1191,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, unsigned int bkt_clamp; unsigned int rq_clamp; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); /* * If sched_uclamp_used was enabled after task @p was enqueued, @@ -1864,7 +1864,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); deactivate_task(rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, new_cpu); @@ -2038,7 +2038,7 @@ int push_cpu_stop(void *arg) struct task_struct *p = arg; raw_spin_lock_irq(&p->pi_lock); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); if (task_rq(p) != rq) goto out_unlock; @@ -2068,7 +2068,7 @@ int push_cpu_stop(void *arg) out_unlock: rq->push_busy = false; - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); @@ -2121,7 +2121,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 * Because __kthread_bind() calls this on blocked tasks without * holding rq->lock. */ - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); } if (running) @@ -2462,7 +2462,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * task_rq_lock(). */ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || - lockdep_is_held(&task_rq(p)->lock))); + lockdep_is_held(rq_lockp(task_rq(p))))); #endif /* * Clearly, migrating tasks to offline CPUs is a fairly daft thing. @@ -3004,7 +3004,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, { int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (p->sched_contributes_to_load) rq->nr_uninterruptible--; @@ -4015,7 +4015,7 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head) void (*func)(struct rq *rq); struct callback_head *next; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); while (head) { func = (void (*)(struct rq *))head->func; @@ -4038,7 +4038,7 @@ static inline struct callback_head *splice_balance_callbacks(struct rq *rq) { struct callback_head *head = rq->balance_callback; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (head) rq->balance_callback = NULL; @@ -4055,9 +4055,9 @@ static inline void balance_callbacks(struct rq *rq, struct callback_head *head) unsigned long flags; if (unlikely(head)) { - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); do_balance_callbacks(rq, head); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); } } @@ -4088,10 +4088,10 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf * do an early lockdep release here: */ rq_unpin_lock(rq, rf); - spin_release(&rq->lock.dep_map, _THIS_IP_); + spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_); #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ - rq->lock.owner = next; + rq_lockp(rq)->owner = next; #endif } @@ -4102,9 +4102,9 @@ static inline void finish_lock_switch(struct rq *rq) * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); + spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); __balance_callbacks(rq); - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); } /* @@ -5164,7 +5164,7 @@ static void __sched notrace __schedule(bool preempt) rq_unpin_lock(rq, &rf); __balance_callbacks(rq); - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); } } @@ -5706,7 +5706,7 @@ out_unlock: rq_unpin_lock(rq, &rf); __balance_callbacks(rq); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); preempt_enable(); } @@ -7456,7 +7456,7 @@ void init_idle(struct task_struct *idle, int cpu) __sched_fork(0, idle); raw_spin_lock_irqsave(&idle->pi_lock, flags); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); @@ -7494,7 +7494,7 @@ void init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP idle->on_cpu = 1; #endif - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&idle->pi_lock, flags); /* Set the preempt count _outside_ the spinlocks! */ @@ -7660,7 +7660,7 @@ static void balance_push(struct rq *rq) { struct task_struct *push_task = rq->curr; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); SCHED_WARN_ON(rq->cpu != smp_processor_id()); /* @@ -7698,9 +7698,9 @@ static void balance_push(struct rq *rq) */ if (!rq->nr_running && !rq_has_pinned_tasks(rq) && rcuwait_active(&rq->hotplug_wait)) { - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); rcuwait_wake_up(&rq->hotplug_wait); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); } return; } @@ -7710,7 +7710,7 @@ static void balance_push(struct rq *rq) * Temporarily drop rq->lock such that we can wake-up the stop task. * Both preemption and IRQs are still disabled. */ - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, this_cpu_ptr(&push_work)); /* @@ -7718,7 +7718,7 @@ static void balance_push(struct rq *rq) * schedule(). The next pick is obviously going to be the stop task * which kthread_is_per_cpu() and will push this task away. */ - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); } static void balance_push_set(int cpu, bool on) @@ -8008,7 +8008,7 @@ static void dump_rq_tasks(struct rq *rq, const char *loglvl) struct task_struct *g, *p; int cpu = cpu_of(rq); - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); for_each_process_thread(g, p) { @@ -8181,7 +8181,7 @@ void __init sched_init(void) struct rq *rq; rq = cpu_rq(i); - raw_spin_lock_init(&rq->lock); + raw_spin_lock_init(&rq->__lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; |