diff options
author | Andi Kleen <ak@linux.intel.com> | 2013-08-17 01:17:19 +0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-09-11 02:27:43 +0400 |
commit | ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff (patch) | |
tree | 9a41335f282ba7851abf625fb295369aaa6061d9 /arch/x86 | |
parent | 6e4664525b1db28f8c4e1130957f70a94c19213e (diff) | |
download | linux-ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff.tar.xz |
x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
The 64bit __copy_{from,to}_user_inatomic always called
copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
byte accesses.
This especially hurts the futex call, which accesses the 4 byte futex
user value with a complicated fast string operation in a function call,
instead of a single movl.
Use __copy_{from,to}_user for _inatomic instead to get the same
optimizations. The only problem was the might_fault() in those functions.
So move that to new wrapper and call __copy_{f,t}_user_nocheck()
from *_inatomic directly.
32bit already did this correctly by duplicating the code.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 4f7923dd0007..64476bb2a146 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { @@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +int __copy_from_user(void *dst, const void __user *src, unsigned size) +{ + might_fault(); + return __copy_from_user_nocheck(dst, src, size); +} + +static __always_inline __must_check +int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { @@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check +int __copy_to_user(void __user *dst, const void *src, unsigned size) +{ + might_fault(); + return __copy_to_user_nocheck(dst, src, size); +} + +static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { int ret = 0; @@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { - return copy_user_generic(dst, (__force const void *)src, size); + return __copy_from_user_nocheck(dst, (__force const void *)src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { - return copy_user_generic((__force void *)dst, src, size); + return __copy_to_user_nocheck((__force void *)dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, |