summaryrefslogtreecommitdiff
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2011-04-27 23:56:14 +0400
committerOleg Nesterov <oleg@redhat.com>2011-04-28 15:01:39 +0400
commitb182801ab35f7a0afb3cdf8ba5df464d04206b46 (patch)
treedcdda5843c6453b60fe4adcf004283ac5f8e315b /kernel/signal.c
parent943df1485a8ff0e600729e082e568ece04d4de9e (diff)
downloadlinux-b182801ab35f7a0afb3cdf8ba5df464d04206b46.tar.xz
signal: do_sigtimedwait() needs retarget_shared_pending()
do_sigtimedwait() changes current->blocked and thus it needs set_current_blocked()->retarget_shared_pending(). We could use set_current_blocked() directly. It is fine to change ->real_blocked from all-zeroes to ->blocked and vice versa lockless, but this is not immediately clear, looks racy, and needs a huge comment to explain why this is correct. To keep the things simple this patch adds the new static helper, __set_task_blocked() which should be called with ->siglock held. This way we can change both ->real_blocked and ->blocked atomically under ->siglock as the current code does. This is more understandable. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Matt Fleming <matt.fleming@linux.intel.com>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 1ab89f677424..4d97e11d7672 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2299,6 +2299,18 @@ long do_no_restart_syscall(struct restart_block *param)
return -EINTR;
}
+static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
+{
+ if (signal_pending(tsk) && !thread_group_empty(tsk)) {
+ sigset_t newblocked;
+ /* A set of now blocked but previously unblocked signals. */
+ signandsets(&newblocked, newset, &current->blocked);
+ retarget_shared_pending(tsk, &newblocked);
+ }
+ tsk->blocked = *newset;
+ recalc_sigpending();
+}
+
/**
* set_current_blocked - change current->blocked mask
* @newset: new mask
@@ -2311,14 +2323,7 @@ void set_current_blocked(const sigset_t *newset)
struct task_struct *tsk = current;
spin_lock_irq(&tsk->sighand->siglock);
- if (signal_pending(tsk) && !thread_group_empty(tsk)) {
- sigset_t newblocked;
- /* A set of now blocked but previously unblocked signals. */
- signandsets(&newblocked, newset, &current->blocked);
- retarget_shared_pending(tsk, &newblocked);
- }
- tsk->blocked = *newset;
- recalc_sigpending();
+ __set_task_blocked(tsk, newset);
spin_unlock_irq(&tsk->sighand->siglock);
}
@@ -2541,7 +2546,8 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
/*
* None ready, temporarily unblock those we're interested
* while we are sleeping in so that we'll be awakened when
- * they arrive.
+ * they arrive. Unblocking is always fine, we can avoid
+ * set_current_blocked().
*/
tsk->real_blocked = tsk->blocked;
sigandsets(&tsk->blocked, &tsk->blocked, &mask);
@@ -2551,10 +2557,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
timeout = schedule_timeout_interruptible(timeout);
spin_lock_irq(&tsk->sighand->siglock);
- sig = dequeue_signal(tsk, &mask, info);
- tsk->blocked = tsk->real_blocked;
+ __set_task_blocked(tsk, &tsk->real_blocked);
siginitset(&tsk->real_blocked, 0);
- recalc_sigpending();
+ sig = dequeue_signal(tsk, &mask, info);
}
spin_unlock_irq(&tsk->sighand->siglock);