diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2020-07-11 07:27:49 +0300 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2020-08-20 22:45:15 +0300 |
commit | c693cc4676a055c4126e487b30b0a96ea7ec9936 (patch) | |
tree | 8f057e3923deeffd1405e40e7ab1c056fa204189 /arch/alpha/lib | |
parent | 99a2c96d52d312b11a943372964226fa134de3b1 (diff) | |
download | linux-c693cc4676a055c4126e487b30b0a96ea7ec9936.tar.xz |
saner calling conventions for csum_and_copy_..._user()
All callers of these primitives will
* discard anything we might've copied in case of error
* ignore the csum value in case of error
* always pass 0xffffffff as the initial sum, so the
resulting csum value (in case of success, that is) will never be 0.
That suggest the following calling conventions:
* don't pass err_ptr - just return 0 on error.
* don't bother with zeroing destination, etc. in case of error
* don't pass the initial sum - just use 0xffffffff.
This commit does the minimal conversion in the instances of csum_and_copy_...();
the changes of actual asm code behind them are done later in the series.
Note that this asm code is often shared with csum_partial_copy_nocheck();
the difference is that csum_partial_copy_nocheck() passes 0 for initial
sum while csum_and_copy_..._user() pass 0xffffffff. Fortunately, we are
free to pass 0xffffffff in all cases and subsequent patches will use that
freedom without any special comments.
A part that could be split off: parisc and uml/i386 claimed to have
csum_and_copy_to_user() instances of their own, but those were identical
to the generic one, so we simply drop them. Not sure if it's worth
a separate commit...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/alpha/lib')
-rw-r--r-- | arch/alpha/lib/csum_partial_copy.c | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index f363dc89fcbe..3c0e89c39ddb 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -325,30 +325,27 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, } __wsum -csum_and_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *errp) +csum_and_copy_from_user(const void __user *src, void *dst, int len) { - unsigned long checksum = (__force u32) sum; + unsigned long checksum = ~0U; unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; + int err = 0; if (len) { - if (!access_ok(src, len)) { - if (errp) *errp = -EFAULT; - memset(dst, 0, len); - return sum; - } + if (!access_ok(src, len)) + return 0; if (!doff) { if (!soff) checksum = csum_partial_cfu_aligned( (const unsigned long __user *) src, (unsigned long *) dst, - len-8, checksum, errp); + len-8, checksum, &err); else checksum = csum_partial_cfu_dest_aligned( (const unsigned long __user *) src, (unsigned long *) dst, - soff, len-8, checksum, errp); + soff, len-8, checksum, &err); } else { unsigned long partial_dest; ldq_u(partial_dest, dst); @@ -357,15 +354,15 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len, (const unsigned long __user *) src, (unsigned long *) dst, doff, len-8, checksum, - partial_dest, errp); + partial_dest, &err); else checksum = csum_partial_cfu_unaligned( (const unsigned long __user *) src, (unsigned long *) dst, soff, doff, len-8, checksum, - partial_dest, errp); + partial_dest, &err); } - checksum = from64to16 (checksum); + checksum = err ? 0 : from64to16 (checksum); } return (__force __wsum)checksum; } @@ -378,7 +375,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len) mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); checksum = csum_and_copy_from_user((__force const void __user *)src, - dst, len, 0, NULL); + dst, len); set_fs(oldfs); return checksum; } |