diff options
| author | Christophe Leroy (CS GROUP) <chleroy@kernel.org> | 2026-03-10 13:01:31 +0300 |
|---|---|---|
| committer | Madhavan Srinivasan <maddy@linux.ibm.com> | 2026-04-01 06:51:07 +0300 |
| commit | bf53ede0038fe2a7b02cad85f337aba43ced572a (patch) | |
| tree | 2e2a0c19e5f9aa78cf67c2fddf5f7310e201414d | |
| parent | 679fa9c756c7d6fcb6ae611f695d286c53dca076 (diff) | |
| download | linux-bf53ede0038fe2a7b02cad85f337aba43ced572a.tar.xz | |
powerpc/align: Convert emulate_spe() to scoped user access
Commit 861574d51bbd ("powerpc/uaccess: Implement masked user access")
provides optimised user access by avoiding the cost of access_ok().
Convert emulate_spe() to scoped user access to benefit from masked
user access.
Scoped user access also make the code simpler.
Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/4ff83cb240da4e2d0c34e2bce4b8b6ef19a33777.1773136880.git.chleroy@kernel.org
| -rw-r--r-- | arch/powerpc/kernel/align.c | 75 |
1 files changed, 33 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 3e37ece06739..61409431138f 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -165,25 +165,23 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, temp.ll = data.ll = 0; p = addr; - if (!user_read_access_begin(addr, nb)) - return -EFAULT; - - switch (nb) { - case 8: - unsafe_get_user(temp.v[0], p++, Efault_read); - unsafe_get_user(temp.v[1], p++, Efault_read); - unsafe_get_user(temp.v[2], p++, Efault_read); - unsafe_get_user(temp.v[3], p++, Efault_read); - fallthrough; - case 4: - unsafe_get_user(temp.v[4], p++, Efault_read); - unsafe_get_user(temp.v[5], p++, Efault_read); - fallthrough; - case 2: - unsafe_get_user(temp.v[6], p++, Efault_read); - unsafe_get_user(temp.v[7], p++, Efault_read); + scoped_user_read_access_size(addr, nb, efault) { + switch (nb) { + case 8: + unsafe_get_user(temp.v[0], p++, efault); + unsafe_get_user(temp.v[1], p++, efault); + unsafe_get_user(temp.v[2], p++, efault); + unsafe_get_user(temp.v[3], p++, efault); + fallthrough; + case 4: + unsafe_get_user(temp.v[4], p++, efault); + unsafe_get_user(temp.v[5], p++, efault); + fallthrough; + case 2: + unsafe_get_user(temp.v[6], p++, efault); + unsafe_get_user(temp.v[7], p++, efault); + } } - user_read_access_end(); switch (instr) { case EVLDD: @@ -252,25 +250,23 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, if (flags & ST) { p = addr; - if (!user_write_access_begin(addr, nb)) - return -EFAULT; - - switch (nb) { - case 8: - unsafe_put_user(data.v[0], p++, Efault_write); - unsafe_put_user(data.v[1], p++, Efault_write); - unsafe_put_user(data.v[2], p++, Efault_write); - unsafe_put_user(data.v[3], p++, Efault_write); - fallthrough; - case 4: - unsafe_put_user(data.v[4], p++, Efault_write); - unsafe_put_user(data.v[5], p++, Efault_write); - fallthrough; - case 2: - unsafe_put_user(data.v[6], p++, Efault_write); - unsafe_put_user(data.v[7], p++, Efault_write); + scoped_user_write_access_size(addr, nb, efault) { + switch (nb) { + case 8: + unsafe_put_user(data.v[0], p++, efault); + unsafe_put_user(data.v[1], p++, efault); + unsafe_put_user(data.v[2], p++, efault); + unsafe_put_user(data.v[3], p++, efault); + fallthrough; + case 4: + unsafe_put_user(data.v[4], p++, efault); + unsafe_put_user(data.v[5], p++, efault); + fallthrough; + case 2: + unsafe_put_user(data.v[6], p++, efault); + unsafe_put_user(data.v[7], p++, efault); + } } - user_write_access_end(); } else { *evr = data.w[0]; regs->gpr[reg] = data.w[1]; @@ -278,12 +274,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, return 1; -Efault_read: - user_read_access_end(); - return -EFAULT; - -Efault_write: - user_write_access_end(); +efault: return -EFAULT; } #endif /* CONFIG_SPE */ |
