diff options
author | Yanfei Xu <yanfei.xu@windriver.com> | 2021-10-13 16:41:52 +0300 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-10-19 18:27:06 +0300 |
commit | 6c2787f2a20ceb49c98bd06f7dad1589eed1c951 (patch) | |
tree | 087af925e2290780f932b1b960ae47beb1db663f /kernel | |
parent | 7cdacc5f52d68a9370f182c844b5b3e6cc975cc1 (diff) | |
download | linux-6c2787f2a20ceb49c98bd06f7dad1589eed1c951.tar.xz |
locking: Remove rcu_read_{,un}lock() for preempt_{dis,en}able()
preempt_disable/enable() is equal to RCU read-side crital section, and
the spinning codes in mutex and rwsem could ensure that the preemption
is disabled. So let's remove the unnecessary rcu_read_lock/unlock for
saving some cycles in hot codes.
Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20211013134154.1085649-2-yanfei.xu@windriver.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/mutex.c | 22 | ||||
-rw-r--r-- | kernel/locking/rwsem.c | 14 |
2 files changed, 24 insertions, 12 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 2fede72b6af5..db1913611192 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -351,13 +351,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, { bool ret = true; - rcu_read_lock(); + lockdep_assert_preemption_disabled(); + while (__mutex_owner(lock) == owner) { /* * Ensure we emit the owner->on_cpu, dereference _after_ - * checking lock->owner still matches owner. If that fails, - * owner might point to freed memory. If it still matches, - * the rcu_read_lock() ensures the memory stays valid. + * checking lock->owner still matches owner. And we already + * disabled preemption which is equal to the RCU read-side + * crital section in optimistic spinning code. Thus the + * task_strcut structure won't go away during the spinning + * period */ barrier(); @@ -377,7 +380,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, cpu_relax(); } - rcu_read_unlock(); return ret; } @@ -390,19 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) struct task_struct *owner; int retval = 1; + lockdep_assert_preemption_disabled(); + if (need_resched()) return 0; - rcu_read_lock(); + /* + * We already disabled preemption which is equal to the RCU read-side + * crital section in optimistic spinning code. Thus the task_strcut + * structure won't go away during the spinning period. + */ owner = __mutex_owner(lock); /* * As lock holder preemption issue, we both skip spinning if task is not * on cpu or its cpu is preempted */ + if (owner) retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); - rcu_read_unlock(); /* * If lock->owner is not set, the mutex has been released. Return true diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 29eea50a3e67..884aa08e0624 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -635,7 +635,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) } preempt_disable(); - rcu_read_lock(); + /* + * Disable preemption is equal to the RCU read-side crital section, + * thus the task_strcut structure won't go away. + */ owner = rwsem_owner_flags(sem, &flags); /* * Don't check the read-owner as the entry may be stale. @@ -643,7 +646,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) if ((flags & RWSEM_NONSPINNABLE) || (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) ret = false; - rcu_read_unlock(); preempt_enable(); lockevent_cond_inc(rwsem_opt_fail, !ret); @@ -671,12 +673,13 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) unsigned long flags, new_flags; enum owner_state state; + lockdep_assert_preemption_disabled(); + owner = rwsem_owner_flags(sem, &flags); state = rwsem_owner_state(owner, flags); if (state != OWNER_WRITER) return state; - rcu_read_lock(); for (;;) { /* * When a waiting writer set the handoff flag, it may spin @@ -694,7 +697,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) * Ensure we emit the owner->on_cpu, dereference _after_ * checking sem->owner still matches owner, if that fails, * owner might point to free()d memory, if it still matches, - * the rcu_read_lock() ensures the memory stays valid. + * our spinning context already disabled preemption which is + * equal to RCU read-side crital section ensures the memory + * stays valid. */ barrier(); @@ -705,7 +710,6 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) cpu_relax(); } - rcu_read_unlock(); return state; } |