summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-06-07 18:51:27 +0300
committerIngo Molnar <mingo@kernel.org>2017-08-10 13:29:03 +0300
commita9668cd6ee288c4838bc668880ac085be551cac2 (patch)
tree5a4b71cef5d9f8acf8f2c67c4c518146b1c49a51 /fs
parentd89e588ca4081615216cc25f2489b0281ac0bfe9 (diff)
downloadlinux-a9668cd6ee288c4838bc668880ac085be551cac2.tar.xz
locking: Remove smp_mb__before_spinlock()
Now that there are no users of smp_mb__before_spinlock() left, remove it entirely. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/userfaultfd.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 06ea26b8c996..44fcbefd84a2 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -109,27 +109,24 @@ static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
goto out;
WRITE_ONCE(uwq->waken, true);
/*
- * The implicit smp_mb__before_spinlock in try_to_wake_up()
- * renders uwq->waken visible to other CPUs before the task is
- * waken.
+ * The Program-Order guarantees provided by the scheduler
+ * ensure uwq->waken is visible before the task is woken.
*/
ret = wake_up_state(wq->private, mode);
- if (ret)
+ if (ret) {
/*
* Wake only once, autoremove behavior.
*
- * After the effect of list_del_init is visible to the
- * other CPUs, the waitqueue may disappear from under
- * us, see the !list_empty_careful() in
- * handle_userfault(). try_to_wake_up() has an
- * implicit smp_mb__before_spinlock, and the
- * wq->private is read before calling the extern
- * function "wake_up_state" (which in turns calls
- * try_to_wake_up). While the spin_lock;spin_unlock;
- * wouldn't be enough, the smp_mb__before_spinlock is
- * enough to avoid an explicit smp_mb() here.
+ * After the effect of list_del_init is visible to the other
+ * CPUs, the waitqueue may disappear from under us, see the
+ * !list_empty_careful() in handle_userfault().
+ *
+ * try_to_wake_up() has an implicit smp_mb(), and the
+ * wq->private is read before calling the extern function
+ * "wake_up_state" (which in turns calls try_to_wake_up).
*/
list_del_init(&wq->entry);
+ }
out:
return ret;
}