summaryrefslogtreecommitdiff
path: root/fs/eventfd.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-02-02 18:23:03 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-02-11 15:34:08 +0300
commiteaef83c4c0cb8c82ab7cea99479d49d35a5cd25d (patch)
tree4f5837096454967295249bde5c311130ac6a4865 /fs/eventfd.c
parente608cd92bac3a37436666b75e7272af968ebbbc5 (diff)
downloadlinux-eaef83c4c0cb8c82ab7cea99479d49d35a5cd25d.tar.xz
eventfd: track eventfd_signal() recursion depth
commit b5e683d5cab8cd433b06ae178621f083cabd4f63 upstream. eventfd use cases from aio and io_uring can deadlock due to circular or resursive calling, when eventfd_signal() tries to grab the waitqueue lock. On top of that, it's also possible to construct notification chains that are deep enough that we could blow the stack. Add a percpu counter that tracks the percpu recursion depth, warn if we exceed it. The counter is also exposed so that users of eventfd_signal() can do the right thing if it's non-zero in the context where it is called. Cc: stable@vger.kernel.org # 4.19+ Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/eventfd.c')
-rw-r--r--fs/eventfd.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 08d3bd602f73..ce1d1711fbba 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -22,6 +22,8 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
struct eventfd_ctx {
struct kref kref;
wait_queue_head_t wqh;
@@ -55,12 +57,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
unsigned long flags;
+ /*
+ * Deadlock or stack overflow issues can happen if we recurse here
+ * through waitqueue wakeup handlers. If the caller users potentially
+ * nested waitqueues with custom wakeup handlers, then it should
+ * check eventfd_signal_count() before calling this function. If
+ * it returns true, the eventfd_signal() call should be deferred to a
+ * safe context.
+ */
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ return 0;
+
spin_lock_irqsave(&ctx->wqh.lock, flags);
+ this_cpu_inc(eventfd_wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+ this_cpu_dec(eventfd_wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;