summaryrefslogtreecommitdiff
path: root/arch/sparc/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-08-16 00:47:54 +0300
committerDavid S. Miller <davem@davemloft.net>2016-10-24 21:31:58 +0300
commit83a17d2661674d8c198adc0e183418f72aabab79 (patch)
treedfda84fcce63b99268c872b9264e302e4dc8f13c /arch/sparc/include
parentaa95ce361ed95c72ac42dcb315166bce5cf1a014 (diff)
downloadlinux-83a17d2661674d8c198adc0e183418f72aabab79.tar.xz
sparc64: Prepare to move to more saner user copy exception handling.
The fixup helper function mechanism for handling user copy fault handling is not %100 accurrate, and can never be made so. We are going to transition the code to return the running return return length, which is always kept track in one or more registers of each of these routines. In order to convert them one by one, we have to allow the existing behavior to continue functioning. Therefore make all the copy code that wants the fixup helper to be used return negative one. After all of the user copy routines have been converted, this logic and the fixup helpers themselves can be removed completely. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/uaccess_64.h21
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index f8518df34b63..0244012435c8 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -198,8 +198,11 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
check_object_size(to, size, false);
ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
+ if (unlikely(ret)) {
+ if ((long)ret < 0)
+ ret = copy_from_user_fixup(to, from, size);
+ return ret;
+ }
return ret;
}
@@ -218,8 +221,11 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
check_object_size(from, size, true);
ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
+ if (unlikely(ret)) {
+ if ((long)ret < 0)
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+ }
return ret;
}
#define __copy_to_user copy_to_user
@@ -234,8 +240,11 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
{
unsigned long ret = ___copy_in_user(to, from, size);
- if (unlikely(ret))
- ret = copy_in_user_fixup(to, from, size);
+ if (unlikely(ret)) {
+ if ((long)ret < 0)
+ ret = copy_in_user_fixup(to, from, size);
+ return ret;
+ }
return ret;
}
#define __copy_in_user copy_in_user