diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-01-23 21:04:19 +0300 |
---|---|---|
committer | Mike Marshall <hubcap@omnibond.com> | 2016-01-23 23:15:09 +0300 |
commit | 70c6ea26ff2d2df420d573f8f0f22853336c0b56 (patch) | |
tree | ede1fdddf167b3bf753fd46d92780e9558ed0582 /fs/orangefs/waitqueue.c | |
parent | e1056a9cc35c878b6615d0fc84d3f338c89a38fa (diff) | |
download | linux-70c6ea26ff2d2df420d573f8f0f22853336c0b56.tar.xz |
orangefs: reduce nesting in wait_for_matching_downcall()
reorder if branches...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Mike Marshall <hubcap@omnibond.com>
Diffstat (limited to 'fs/orangefs/waitqueue.c')
-rw-r--r-- | fs/orangefs/waitqueue.c | 118 |
1 files changed, 58 insertions, 60 deletions
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c index b8a2fcbcce64..8c07a070e2b6 100644 --- a/fs/orangefs/waitqueue.c +++ b/fs/orangefs/waitqueue.c @@ -376,79 +376,77 @@ static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op) } spin_unlock(&op->lock); - if (!signal_pending(current)) { - /* - * if this was our first attempt and client-core - * has not purged our operation, we are happy to - * simply wait - */ - spin_lock(&op->lock); - if (op->attempts == 0 && !op_state_purged(op)) { - spin_unlock(&op->lock); - schedule(); - } else { - spin_unlock(&op->lock); - /* - * subsequent attempts, we retry exactly once - * with timeouts - */ - if (!schedule_timeout(MSECS_TO_JIFFIES - (1000 * op_timeout_secs))) { - gossip_debug(GOSSIP_WAIT_DEBUG, - "*** %s:" - " operation timed out (tag" - " %llu, %p, att %d)\n", - __func__, - llu(op->tag), - op, - op->attempts); - ret = -ETIMEDOUT; - orangefs_clean_up_interrupted_operation - (op); - break; - } - } - spin_lock(&op->lock); - op->attempts++; + if (unlikely(signal_pending(current))) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "*** %s:" + " operation interrupted by a signal (tag " + "%llu, op %p)\n", + __func__, + llu(op->tag), + op); + orangefs_clean_up_interrupted_operation(op); + ret = -EINTR; + break; + } + + /* + * if this was our first attempt and client-core + * has not purged our operation, we are happy to + * simply wait + */ + spin_lock(&op->lock); + if (op->attempts == 0 && !op_state_purged(op)) { + spin_unlock(&op->lock); + schedule(); + } else { + spin_unlock(&op->lock); /* - * if the operation was purged in the meantime, it - * is better to requeue it afresh but ensure that - * we have not been purged repeatedly. This could - * happen if client-core crashes when an op - * is being serviced, so we requeue the op, client - * core crashes again so we requeue the op, client - * core starts, and so on... + * subsequent attempts, we retry exactly once + * with timeouts */ - if (op_state_purged(op)) { - ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? - -EAGAIN : - -EIO; - spin_unlock(&op->lock); + if (!schedule_timeout(MSECS_TO_JIFFIES + (1000 * op_timeout_secs))) { gossip_debug(GOSSIP_WAIT_DEBUG, "*** %s:" - " operation purged (tag " - "%llu, %p, att %d)\n", + " operation timed out (tag" + " %llu, %p, att %d)\n", __func__, llu(op->tag), op, op->attempts); + ret = -ETIMEDOUT; orangefs_clean_up_interrupted_operation(op); break; } + } + spin_lock(&op->lock); + op->attempts++; + /* + * if the operation was purged in the meantime, it + * is better to requeue it afresh but ensure that + * we have not been purged repeatedly. This could + * happen if client-core crashes when an op + * is being serviced, so we requeue the op, client + * core crashes again so we requeue the op, client + * core starts, and so on... + */ + if (op_state_purged(op)) { + ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? + -EAGAIN : + -EIO; spin_unlock(&op->lock); - continue; + gossip_debug(GOSSIP_WAIT_DEBUG, + "*** %s:" + " operation purged (tag " + "%llu, %p, att %d)\n", + __func__, + llu(op->tag), + op, + op->attempts); + orangefs_clean_up_interrupted_operation(op); + break; } - - gossip_debug(GOSSIP_WAIT_DEBUG, - "*** %s:" - " operation interrupted by a signal (tag " - "%llu, op %p)\n", - __func__, - llu(op->tag), - op); - orangefs_clean_up_interrupted_operation(op); - ret = -EINTR; - break; + spin_unlock(&op->lock); } spin_lock(&op->lock); |