diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2018-07-23 21:38:00 +0300 |
---|---|---|
committer | Eric W. Biederman <ebiederm@xmission.com> | 2018-08-04 04:20:14 +0300 |
commit | 924de3b8c9410c404c6eda7abffd282b97b3ff7f (patch) | |
tree | 6bcc1ed1e1a9268ad6ce8c899d9e728e769277fb | |
parent | 4390e9eadbbb6774b7ba03fde0a0fdf3f07db4cd (diff) | |
download | linux-924de3b8c9410c404c6eda7abffd282b97b3ff7f.tar.xz |
fork: Have new threads join on-going signal group stops
There are only two signals that are delivered to every member of a
signal group: SIGSTOP and SIGKILL. Signal delivery requires every
signal appear to be delivered either before or after a clone syscall.
SIGKILL terminates the clone so does not need to be considered. Which
leaves only SIGSTOP that needs to be considered when creating new
threads.
Today in the event of a group stop TIF_SIGPENDING will get set and the
fork will restart ensuring the fork syscall participates in the group
stop.
A fork (especially of a process with a lot of memory) is one of the
most expensive system so we really only want to restart a fork when
necessary.
It is easy so check to see if a SIGSTOP is ongoing and have the new
thread join it immediate after the clone completes. Making it appear
the clone completed happened just before the SIGSTOP.
The calculate_sigpending function will see the bits set in jobctl and
set TIF_SIGPENDING to ensure the new task takes the slow path to userspace.
V2: The call to task_join_group_stop was moved before the new task is
added to the thread group list. This should not matter as
sighand->siglock is held over both the addition of the threads,
the call to task_join_group_stop and do_signal_stop. But the change
is trivial and it is one less thing to worry about when reading
the code.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
-rw-r--r-- | include/linux/sched/signal.h | 2 | ||||
-rw-r--r-- | kernel/fork.c | 27 | ||||
-rw-r--r-- | kernel/signal.c | 14 |
3 files changed, 31 insertions, 12 deletions
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index b55fd293c1e5..ae2b0b81be25 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -385,6 +385,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) signal_wake_up_state(t, resume ? __TASK_TRACED : 0); } +void task_join_group_stop(struct task_struct *task); + #ifdef TIF_RESTORE_SIGMASK /* * Legacy restore_sigmask accessors. These are inefficient on diff --git a/kernel/fork.c b/kernel/fork.c index 22d4cdb9a7ca..ab731e15a600 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1934,18 +1934,20 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } - /* - * Process group and session signals need to be delivered to just the - * parent before the fork or both the parent and the child after the - * fork. Restart if a signal comes in before we add the new process to - * it's process group. - * A fatal signal pending means that current will exit, so the new - * thread can't slip out of an OOM kill (or normal SIGKILL). - */ - recalc_sigpending(); - if (signal_pending(current)) { - retval = -ERESTARTNOINTR; - goto bad_fork_cancel_cgroup; + if (!(clone_flags & CLONE_THREAD)) { + /* + * Process group and session signals need to be delivered to just the + * parent before the fork or both the parent and the child after the + * fork. Restart if a signal comes in before we add the new process to + * it's process group. + * A fatal signal pending means that current will exit, so the new + * thread can't slip out of an OOM kill (or normal SIGKILL). + */ + recalc_sigpending(); + if (signal_pending(current)) { + retval = -ERESTARTNOINTR; + goto bad_fork_cancel_cgroup; + } } @@ -1982,6 +1984,7 @@ static __latent_entropy struct task_struct *copy_process( current->signal->nr_threads++; atomic_inc(¤t->signal->live); atomic_inc(¤t->signal->sigcnt); + task_join_group_stop(p); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, diff --git a/kernel/signal.c b/kernel/signal.c index 1e06f1eba363..9f0eafb6d474 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -373,6 +373,20 @@ static bool task_participate_group_stop(struct task_struct *task) return false; } +void task_join_group_stop(struct task_struct *task) +{ + /* Have the new thread join an on-going signal group stop */ + unsigned long jobctl = current->jobctl; + if (jobctl & JOBCTL_STOP_PENDING) { + struct signal_struct *sig = current->signal; + unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; + unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; + if (task_set_jobctl_pending(task, signr | gstop)) { + sig->group_stop_count++; + } + } +} + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an |