diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2020-08-31 20:06:51 +0300 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2020-10-26 03:01:57 +0300 |
commit | db502f8a3b0bb5188f92d9d6a68aed223892689b (patch) | |
tree | ac3cc7f5af4044ce33ae69c05e8a1f3bc76c8d3a /fs/eventpoll.c | |
parent | bde03c4c1a6b3b679a63aa8f275ac12ffdd58c65 (diff) | |
download | linux-db502f8a3b0bb5188f92d9d6a68aed223892689b.tar.xz |
ep_scan_ready_list(): prepare to splitup
take the stuff done before and after the callback into separate helpers
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index e971e3ace557..eb012fdc152e 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -561,28 +561,10 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi) rcu_read_unlock(); } -/** - * ep_scan_ready_list - Scans the ready list in a way that makes possible for - * the scan code, to call f_op->poll(). Also allows for - * O(NumReady) performance. - * - * @ep: Pointer to the epoll private data structure. - * @sproc: Pointer to the scan callback. - * @priv: Private opaque data passed to the @sproc callback. - * @depth: The current depth of recursive f_op->poll calls. - * @ep_locked: caller already holds ep->mtx - * - * Returns: The same integer error code returned by the @sproc callback. - */ -static __poll_t ep_scan_ready_list(struct eventpoll *ep, - __poll_t (*sproc)(struct eventpoll *, - struct list_head *, void *), - void *priv, int depth, bool ep_locked) +static void ep_start_scan(struct eventpoll *ep, + int depth, bool ep_locked, + struct list_head *txlist) { - __poll_t res; - struct epitem *epi, *nepi; - LIST_HEAD(txlist); - lockdep_assert_irqs_enabled(); /* @@ -602,14 +584,16 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * in a lockless way. */ write_lock_irq(&ep->lock); - list_splice_init(&ep->rdllist, &txlist); + list_splice_init(&ep->rdllist, txlist); WRITE_ONCE(ep->ovflist, NULL); write_unlock_irq(&ep->lock); +} - /* - * Now call the callback function. - */ - res = (*sproc)(ep, &txlist, priv); +static void ep_done_scan(struct eventpoll *ep, + int depth, bool ep_locked, + struct list_head *txlist) +{ + struct epitem *epi, *nepi; write_lock_irq(&ep->lock); /* @@ -644,13 +628,38 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, /* * Quickly re-inject items left on "txlist". */ - list_splice(&txlist, &ep->rdllist); + list_splice(txlist, &ep->rdllist); __pm_relax(ep->ws); write_unlock_irq(&ep->lock); if (!ep_locked) mutex_unlock(&ep->mtx); +} +/** + * ep_scan_ready_list - Scans the ready list in a way that makes possible for + * the scan code, to call f_op->poll(). Also allows for + * O(NumReady) performance. + * + * @ep: Pointer to the epoll private data structure. + * @sproc: Pointer to the scan callback. + * @priv: Private opaque data passed to the @sproc callback. + * @depth: The current depth of recursive f_op->poll calls. + * @ep_locked: caller already holds ep->mtx + * + * Returns: The same integer error code returned by the @sproc callback. + */ +static __poll_t ep_scan_ready_list(struct eventpoll *ep, + __poll_t (*sproc)(struct eventpoll *, + struct list_head *, void *), + void *priv, int depth, bool ep_locked) +{ + __poll_t res; + LIST_HEAD(txlist); + + ep_start_scan(ep, depth, ep_locked, &txlist); + res = (*sproc)(ep, &txlist, priv); + ep_done_scan(ep, depth, ep_locked, &txlist); return res; } |