summaryrefslogtreecommitdiff
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2020-07-11 07:27:49 +0300
committerAl Viro <viro@zeniv.linux.org.uk>2020-08-20 22:45:15 +0300
commitc693cc4676a055c4126e487b30b0a96ea7ec9936 (patch)
tree8f057e3923deeffd1405e40e7ab1c056fa204189 /arch/x86/lib
parent99a2c96d52d312b11a943372964226fa134de3b1 (diff)
downloadlinux-c693cc4676a055c4126e487b30b0a96ea7ec9936.tar.xz
saner calling conventions for csum_and_copy_..._user()
All callers of these primitives will * discard anything we might've copied in case of error * ignore the csum value in case of error * always pass 0xffffffff as the initial sum, so the resulting csum value (in case of success, that is) will never be 0. That suggest the following calling conventions: * don't pass err_ptr - just return 0 on error. * don't bother with zeroing destination, etc. in case of error * don't pass the initial sum - just use 0xffffffff. This commit does the minimal conversion in the instances of csum_and_copy_...(); the changes of actual asm code behind them are done later in the series. Note that this asm code is often shared with csum_partial_copy_nocheck(); the difference is that csum_partial_copy_nocheck() passes 0 for initial sum while csum_and_copy_..._user() pass 0xffffffff. Fortunately, we are free to pass 0xffffffff in all cases and subsequent patches will use that freedom without any special comments. A part that could be split off: parisc and uml/i386 claimed to have csum_and_copy_to_user() instances of their own, but those were identical to the generic one, so we simply drop them. Not sure if it's worth a separate commit... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/csum-wrappers_64.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 245f929a1c2c..ae2fb87e2274 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -22,13 +22,15 @@
*/
__wsum
csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp)
+ int len)
{
+ int err = 0;
+ __wsum isum = ~0U;
+
might_sleep();
- *errp = 0;
if (!user_access_begin(src, len))
- goto out_err;
+ return 0;
/*
* Why 6, not 7? To handle odd addresses aligned we
@@ -53,20 +55,15 @@ csum_and_copy_from_user(const void __user *src, void *dst,
}
}
isum = csum_partial_copy_generic((__force const void *)src,
- dst, len, isum, errp, NULL);
+ dst, len, isum, &err, NULL);
user_access_end();
- if (unlikely(*errp))
- goto out_err;
-
+ if (unlikely(err))
+ isum = 0;
return isum;
out:
user_access_end();
-out_err:
- *errp = -EFAULT;
- memset(dst, 0, len);
-
- return isum;
+ return 0;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -83,16 +80,15 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
*/
__wsum
csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp)
+ int len)
{
- __wsum ret;
+ __wsum ret, isum = ~0U;
+ int err = 0;
might_sleep();
- if (!user_access_begin(dst, len)) {
- *errp = -EFAULT;
+ if (!user_access_begin(dst, len))
return 0;
- }
if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) {
@@ -107,15 +103,13 @@ csum_and_copy_to_user(const void *src, void __user *dst,
}
}
- *errp = 0;
ret = csum_partial_copy_generic(src, (void __force *)dst,
- len, isum, NULL, errp);
+ len, isum, NULL, &err);
user_access_end();
- return ret;
+ return err ? 0 : ret;
out:
user_access_end();
- *errp = -EFAULT;
- return isum;
+ return 0;
}
EXPORT_SYMBOL(csum_and_copy_to_user);