diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2020-09-10 05:25:06 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-10-14 10:51:09 +0300 |
commit | 23fb662b13e4f75688123e1d16aa7116f602db32 (patch) | |
tree | f7c0e3c259666deec6d503ebd2fc78dfbb933bb8 /fs/eventpoll.c | |
parent | 5b08356f184a0314d87f9a889be2ed9fef087691 (diff) | |
download | linux-23fb662b13e4f75688123e1d16aa7116f602db32.tar.xz |
epoll: do not insert into poll queues until all sanity checks are done
commit f8d4f44df056c5b504b0d49683fb7279218fd207 upstream.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 37 |
1 files changed, 18 insertions, 19 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index af9dfa494b1f..a32df9cad519 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1461,6 +1461,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1483,22 +1499,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); @@ -1527,6 +1527,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1534,9 +1536,6 @@ error_remove_epi: rb_erase_cached(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist |