diff options
author | Arnd Bergmann <arnd@arndb.de> | 2015-01-15 01:17:49 +0300 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2015-01-15 01:17:49 +0300 |
commit | 643165c8bbc8617d8222cb50c89e34fe64d226cf (patch) | |
tree | e2a88de9b8efb0c9fa2436f53b0a3ab4948c4a55 /arch/sparc/include/asm | |
parent | eaa27f34e91a14cdceed26ed6c6793ec1d186115 (diff) | |
parent | 0795cb1b46e7938ed679ccd48f933e75272b30e3 (diff) | |
download | linux-643165c8bbc8617d8222cb50c89e34fe64d226cf.tar.xz |
Merge tag 'uaccess_for_upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost into asm-generic
Merge "uaccess: fix sparse warning on get/put_user for bitwise types" from Michael S. Tsirkin:
At the moment, if p and x are both tagged as bitwise types,
some of get_user(x, p), put_user(x, p), __get_user(x, p), __put_user(x, p)
might produce a sparse warning on many architectures.
This is a false positive: *p on these architectures is loaded into long
(typically using asm), then cast back to typeof(*p).
When typeof(*p) is a bitwise type (which is uncommon), such a cast needs
__force, otherwise sparse produces a warning.
Some architectures already have the __force tag, add it
where it's missing.
I verified that adding these __force casts does not supress any useful warnings.
Specifically, vhost wants to read/write bitwise types in userspace memory
using get_user/put_user.
At the moment this triggers sparse errors, since the value is passed through an
integer.
For example:
__le32 __user *p;
__u32 x;
both
put_user(x, p);
and
get_user(x, p);
should be safe, but produce warnings on some architectures.
While there, I noticed that a bunch of architectures violated
coding style rules within uaccess macros.
Included patches to fix them up.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
* tag 'uaccess_for_upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (37 commits)
sparc32: nocheck uaccess coding style tweaks
sparc64: nocheck uaccess coding style tweaks
xtensa: macro whitespace fixes
sh: macro whitespace fixes
parisc: macro whitespace fixes
m68k: macro whitespace fixes
m32r: macro whitespace fixes
frv: macro whitespace fixes
cris: macro whitespace fixes
avr32: macro whitespace fixes
arm64: macro whitespace fixes
arm: macro whitespace fixes
alpha: macro whitespace fixes
blackfin: macro whitespace fixes
sparc64: uaccess_64 macro whitespace fixes
sparc32: uaccess_32 macro whitespace fixes
avr32: whitespace fix
sh: fix put_user sparse errors
metag: fix put_user sparse errors
ia64: fix put_user sparse errors
...
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 339 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 222 |
2 files changed, 319 insertions, 242 deletions
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9634d086fc56..64ee103dc29d 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -37,7 +37,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. @@ -46,8 +46,8 @@ */ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) -#define access_ok(type, addr, size) \ +#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) +#define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) /* @@ -91,158 +91,221 @@ void __ret_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) - -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ +}) + +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ +}) /* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the user has to do the * checks by hand with "access_ok()") */ -#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct __user *)(x)) -#define __put_user_check(x,addr,size) ({ \ -register int __pu_ret; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} } else { __pu_ret = -EFAULT; } __pu_ret; }) - -#define __put_user_nocheck(x,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_check(x, addr, size) ({ \ + register int __pu_ret; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __put_user_asm(x, b, addr, __pu_ret); \ + break; \ + case 2: \ + __put_user_asm(x, h, addr, __pu_ret); \ + break; \ + case 4: \ + __put_user_asm(x, , addr, __pu_ret); \ + break; \ + case 8: \ + __put_user_asm(x, d, addr, __pu_ret); \ + break; \ + default: \ + __pu_ret = __put_user_bad(); \ + break; \ + } \ + } else { \ + __pu_ret = -EFAULT; \ + } \ + __pu_ret; \ +}) + +#define __put_user_nocheck(x, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(x, , addr, __pu_ret); break; \ + case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size " %1, %2\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\t" \ - ".previous\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size " %1, %2\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\t" \ + ".previous\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_check(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) - -#define __get_user_check_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; } else return retval; }) - -#define __get_user_nocheck(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} x = (type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_check(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ + break; \ + case 2: \ + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ + break; \ + case 4: \ + __get_user_asm(__gu_val, , addr, __gu_ret); \ + break; \ + case 8: \ + __get_user_asm(__gu_val, d, addr, __gu_ret); \ + break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + } else { \ + __gu_val = 0; \ + __gu_ret = -EFAULT; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_check_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm_ret(__gu_val, ub, addr, retval); \ + break; \ + case 2: \ + __get_user_asm_ret(__gu_val, uh, addr, retval); \ + break; \ + case 4: \ + __get_user_asm_ret(__gu_val, , addr, retval); \ + break; \ + case 8: \ + __get_user_asm_ret(__gu_val, d, addr, retval); \ + break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ + } else \ + return retval; \ +}) + +#define __get_user_nocheck(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size " %2, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "clr %1\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size " %2, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "clr %1\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) int __get_user_bad(void); diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c990a5e577f0..a35194b7dba0 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -41,11 +41,11 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) #define get_ds() (KERNEL_DS) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define set_fs(val) \ do { \ - current_thread_info()->current_ds =(val).seg; \ + current_thread_info()->current_ds = (val).seg; \ __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ } while(0) @@ -88,121 +88,135 @@ void __retl_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ +}) -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ +}) -#define __put_user(x,ptr) put_user(x,ptr) -#define __get_user(x,ptr) get_user(x,ptr) +#define __put_user(x, ptr) put_user(x, ptr) +#define __get_user(x, ptr) get_user(x, ptr) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct *)(x)) -#define __put_user_nocheck(data,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ -case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_nocheck(data, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ + case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=r" (ret) : "r" (x), "r" (__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=r" (ret) : "r" (x), "r" (__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_nocheck(data,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} data = (type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} data = (type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_nocheck(data, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + data = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + data = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "clr %1\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "clr %1\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr)), "i" (retval)) int __get_user_bad(void); |