summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/signal_32.c
diff options
context:
space:
mode:
authorSimon Guo <wei.guo.simon@gmail.com>2016-07-26 11:06:01 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2016-09-13 10:37:12 +0300
commite1c0d66fcb179a1737b3d5cc11c6e37bcabbd861 (patch)
tree6260230cda5bbe7a47f5d5955f7892f737fca6c8 /arch/powerpc/kernel/signal_32.c
parent261831160d4df6bafe2f0e12e6de9e5731519e06 (diff)
downloadlinux-e1c0d66fcb179a1737b3d5cc11c6e37bcabbd861.tar.xz
powerpc: Set used_(vsr|vr|spe) in sigreturn path when MSR bits are active
Normally, when MSR[VSX/VR/SPE] bits == 1, the used_vsr/used_vr/used_spe bit have already been set. However when loading a signal frame from user space we need to explicitly set used_vsr/used_vr/used_spe to make them consistent with the MSR bits from the signal frame. For example, CRIU application, who utilizes sigreturn to restore checkpointed process, will lead to the case where MSR[VSX] bit is active in signal frame, but used_vsr bit is not set in the kernel. (the same applies to VR/SPE). This patch fixes this by always setting used_* bit when MSR related bits are active in signal frame and we are doing sigreturn. Based on a proposal by Benh. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> [mpe: Massage change log] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/signal_32.c')
-rw-r--r--arch/powerpc/kernel/signal_32.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 6856276a91cc..d2745375f27e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -699,6 +699,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
+ current->thread.used_vr = true;
} else if (current->thread.used_vr)
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
@@ -725,6 +726,7 @@ static long restore_user_regs(struct pt_regs *regs,
*/
if (copy_vsx_from_user(current, &sr->mc_vsregs))
return 1;
+ current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
@@ -744,6 +746,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
return 1;
+ current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
@@ -800,6 +803,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
&tm_sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
+ current->thread.used_vr = true;
} else if (current->thread.used_vr) {
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
@@ -833,6 +837,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
return 1;
+ current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
@@ -849,6 +854,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
return 1;
+ current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));