diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2023-03-25 15:29:01 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2023-04-11 16:13:33 +0300 |
commit | eed7c420aac7fde5e5915d2747c3ebbbda225835 (patch) | |
tree | 8e29bf90b55ef02b7a06dcad15135363210ae863 | |
parent | 5088a6246bd3dcfea504376f356683f750136f7f (diff) | |
download | linux-eed7c420aac7fde5e5915d2747c3ebbbda225835.tar.xz |
powerpc: copy_thread differentiate kthreads and user mode threads
When copy_thread is given a kernel function to run in arg->fn, this
does not necessarily mean it is a kernel thread. User threads can be
created this way (e.g., kernel_init, see also x86's copy_thread()).
These threads run a kernel function which may call kernel_execve()
and return, which returns like a userspace exec(2) syscall.
Kernel threads are to be differentiated with PF_KTHREAD, will always
have arg->fn set, and should never return from that function, instead
calling kthread_exit() to exit.
Create separate paths for the kthread and user kernel thread creation
logic. The kthread path will never exit and does not require a user
interrupt frame, so it gets a minimal stack frame.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230325122904.2375060-6-npiggin@gmail.com
-rw-r--r-- | arch/powerpc/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 98 |
2 files changed, 64 insertions, 36 deletions
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index cf13d84c4eb9..bf5dde1a4114 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -176,9 +176,11 @@ static inline bool test_thread_local_flags(unsigned int flags) #ifdef CONFIG_COMPAT #define is_32bit_task() (test_thread_flag(TIF_32BIT)) #define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT)) +#define clear_tsk_compat_task(tsk) (clear_tsk_thread_flag(p, TIF_32BIT)) #else #define is_32bit_task() (IS_ENABLED(CONFIG_PPC32)) #define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32)) +#define clear_tsk_compat_task(tsk) do { } while (0) #endif #if defined(CONFIG_PPC64) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3b34bd9a6dff..88898ca7ab0b 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1738,10 +1738,7 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) */ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { - unsigned long clone_flags = args->flags; - unsigned long usp = args->stack; - unsigned long tls = args->tls; - struct pt_regs *childregs, *kregs; + struct pt_regs *kregs; /* Switch frame regs */ extern void ret_from_fork(void); extern void ret_from_fork_scv(void); extern void ret_from_kernel_thread(void); @@ -1754,45 +1751,76 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) klp_init_thread_info(p); - /* Create initial stack frame. */ - sp -= STACK_USER_INT_FRAME_SIZE; - *(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER; - - /* Copy registers */ - childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS); - if (unlikely(args->fn)) { + if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ + + /* Create initial minimum stack frame. */ + sp -= STACK_FRAME_MIN_SIZE; ((unsigned long *)sp)[0] = 0; - memset(childregs, 0, sizeof(struct pt_regs)); - childregs->gpr[1] = sp + STACK_USER_INT_FRAME_SIZE; -#ifdef CONFIG_PPC64 - clear_tsk_thread_flag(p, TIF_32BIT); - childregs->softe = IRQS_ENABLED; -#endif - p->thread.regs = NULL; /* no user register state */ - ti->flags |= _TIF_RESTOREALL; + f = ret_from_kernel_thread; + p->thread.regs = NULL; /* no user register state */ + clear_tsk_compat_task(p); } else { /* user thread */ - struct pt_regs *regs = current_pt_regs(); - *childregs = *regs; - if (usp) - childregs->gpr[1] = usp; - ((unsigned long *)sp)[0] = childregs->gpr[1]; - p->thread.regs = childregs; - if (clone_flags & CLONE_SETTLS) { - if (!is_32bit_task()) - childregs->gpr[13] = tls; + struct pt_regs *childregs; + + /* Create initial user return stack frame. */ + sp -= STACK_USER_INT_FRAME_SIZE; + *(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER; + + childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS); + + if (unlikely(args->fn)) { + /* + * A user space thread, but it first runs a kernel + * thread, and then returns as though it had called + * execve rather than fork, so user regs will be + * filled in (e.g., by kernel_execve()). + */ + ((unsigned long *)sp)[0] = 0; + memset(childregs, 0, sizeof(struct pt_regs)); +#ifdef CONFIG_PPC64 + childregs->softe = IRQS_ENABLED; +#endif + ti->flags |= _TIF_RESTOREALL; + f = ret_from_kernel_thread; + } else { + struct pt_regs *regs = current_pt_regs(); + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + + /* Copy registers */ + *childregs = *regs; + if (usp) + childregs->gpr[1] = usp; + ((unsigned long *)sp)[0] = childregs->gpr[1]; +#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG + WARN_ON_ONCE(childregs->softe != IRQS_ENABLED); +#endif + if (clone_flags & CLONE_SETTLS) { + unsigned long tls = args->tls; + + if (!is_32bit_task()) + childregs->gpr[13] = tls; + else + childregs->gpr[2] = tls; + } + + if (trap_is_scv(regs)) + f = ret_from_fork_scv; else - childregs->gpr[2] = tls; + f = ret_from_fork; } - if (trap_is_scv(regs)) - f = ret_from_fork_scv; - else - f = ret_from_fork; +#ifdef CONFIG_PPC64 + if (cpu_has_feature(CPU_FTR_HAS_PPR)) + childregs->ppr = DEFAULT_PPR; +#endif + + childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); + p->thread.regs = childregs; } - childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); /* * The way this works is that at some point in the future @@ -1843,8 +1871,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.dscr_inherit = current->thread.dscr_inherit; p->thread.dscr = mfspr(SPRN_DSCR); } - if (cpu_has_feature(CPU_FTR_HAS_PPR)) - childregs->ppr = DEFAULT_PPR; p->thread.tidr = 0; #endif |