diff options
author | Simon Guo <wei.guo.simon@gmail.com> | 2016-07-26 11:06:01 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-09-13 10:37:12 +0300 |
commit | e1c0d66fcb179a1737b3d5cc11c6e37bcabbd861 (patch) | |
tree | 6260230cda5bbe7a47f5d5955f7892f737fca6c8 /arch/powerpc/kernel/signal_64.c | |
parent | 261831160d4df6bafe2f0e12e6de9e5731519e06 (diff) | |
download | linux-e1c0d66fcb179a1737b3d5cc11c6e37bcabbd861.tar.xz |
powerpc: Set used_(vsr|vr|spe) in sigreturn path when MSR bits are active
Normally, when MSR[VSX/VR/SPE] bits == 1, the used_vsr/used_vr/used_spe
bit have already been set. However when loading a signal frame from user
space we need to explicitly set used_vsr/used_vr/used_spe to make them
consistent with the MSR bits from the signal frame.
For example, CRIU application, who utilizes sigreturn to restore
checkpointed process, will lead to the case where MSR[VSX] bit is active
in signal frame, but used_vsr bit is not set in the kernel. (the same
applies to VR/SPE).
This patch fixes this by always setting used_* bit when MSR related bits
are active in signal frame and we are doing sigreturn.
Based on a proposal by Benh.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
[mpe: Massage change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/signal_64.c')
-rw-r--r-- | arch/powerpc/kernel/signal_64.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index f08c9196f209..6faa8240b7c9 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -369,9 +369,11 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ - if (v_regs != NULL && (msr & MSR_VEC) != 0) + if (v_regs != NULL && (msr & MSR_VEC) != 0) { err |= __copy_from_user(¤t->thread.vr_state, v_regs, 33 * sizeof(vector128)); + current->thread.used_vr = true; + } else if (current->thread.used_vr) memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ @@ -391,9 +393,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, * buffer for formatting, then into the taskstruct. */ v_regs += ELF_NVRREG; - if ((msr & MSR_VSX) != 0) + if ((msr & MSR_VSX) != 0) { err |= copy_vsx_from_user(current, v_regs); - else + current->thread.used_vsr = true; + } else for (i = 0; i < 32 ; i++) current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif @@ -488,6 +491,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, 33 * sizeof(vector128)); err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); + current->thread.used_vr = true; } else if (current->thread.used_vr) { memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); @@ -521,6 +525,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, tm_v_regs += ELF_NVRREG; err |= copy_vsx_from_user(current, v_regs); err |= copy_transact_vsx_from_user(current, tm_v_regs); + current->thread.used_vsr = true; } else { for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |