diff options
author | Gerald Schaefer <geraldsc@de.ibm.com> | 2006-09-20 17:59:42 +0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-20 17:59:42 +0400 |
commit | d02765d1af743567398eb6d523dea0ba5e5e7e8e (patch) | |
tree | 9a39c21d9924a8d81ce85254cd3d013dbe50d23e /arch/s390 | |
parent | 6837a8c352efcc5efc70424e9bfd94ff9bfa9a47 (diff) | |
download | linux-d02765d1af743567398eb6d523dea0ba5e5e7e8e.tar.xz |
[S390] Make user-copy operations run-time configurable.
Introduces a struct uaccess_ops which allows setting user-copy
operations at run-time.
Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/s390_ksyms.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 7 | ||||
-rw-r--r-- | arch/s390/lib/Makefile | 3 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.S | 211 | ||||
-rw-r--r-- | arch/s390/lib/uaccess64.S | 207 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_std.c | 340 |
6 files changed, 348 insertions, 426 deletions
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index c73a45467fa4..9f19e833a562 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap); EXPORT_SYMBOL(_ni_bitmap); EXPORT_SYMBOL(_zb_findmap); EXPORT_SYMBOL(_sb_findmap); -EXPORT_SYMBOL(__copy_from_user_asm); -EXPORT_SYMBOL(__copy_to_user_asm); -EXPORT_SYMBOL(__copy_in_user_asm); -EXPORT_SYMBOL(__clear_user_asm); -EXPORT_SYMBOL(__strncpy_from_user_asm); -EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(diag10); /* diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f2a9165ca4f8..e229af59976c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -51,6 +51,12 @@ #include <asm/sections.h> /* + * User copy operations. + */ +struct uaccess_ops uaccess; +EXPORT_SYMBOL_GPL(uaccess); + +/* * Machine setup.. */ unsigned int console_mode = 0; @@ -641,6 +647,7 @@ setup_arch(char **cmdline_p) memory_end = memory_size; + memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); parse_early_param(); #ifndef CONFIG_64BIT diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e05d087a6eae..96c82424d88b 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -4,6 +4,5 @@ EXTRA_AFLAGS := -traditional -lib-y += delay.o string.o -lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) +lib-y += delay.o string.o uaccess_std.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S deleted file mode 100644 index 837275284d9f..000000000000 --- a/arch/s390/lib/uaccess.S +++ /dev/null @@ -1,211 +0,0 @@ -/* - * arch/s390/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - ahi %r3,-1 - jo 6f - sacf 256 - bras %r1,4f -0: ahi %r3,257 -1: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - ahi %r3,-1 - jnz 1b -2: lr %r2,%r3 - br %r14 -3: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -4: ahi %r3,-256 - jnm 3b -5: ex %r3,4(%r1) - sacf 0 -6: slr %r2,%r2 - br %r14 - .section __ex_table,"a" - .long 1b,2b - .long 3b,0b - .long 5b,0b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - bras %r5,0f - .long empty_zero_page -0: l %r5,0(%r5) - slr %r0,%r0 -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - ahi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slr %r2,%r2 - br %r14 -5: lhi %r0,-4096 - lr %r4,%r2 - slr %r4,%r0 - nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slr %r4,%r2 # %r4 = #bytes to next user page boundary - clr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slr %r3,%r4 -7: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 1b,5b - .long 3b,5b - .long 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lhi %r0,0 - lr %r1,%r4 - la %r4,0(%r4) # clear high order bit from %r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lr %r1,%r2 - jh 1f # \0 found in string ? - ahi %r1,1 # include \0 in copy -1: slr %r1,%r4 # %r1 = copy length (without \0) - slr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - ahi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lhi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .long 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lhi %r0,0 - lr %r1,%r3 - la %r3,0(%r3) # clear high order bit from %r4 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - ahi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slr %r2,%r3 - br %r14 -2: sacf 0 - slr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .long 0b,2b - .previous diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S deleted file mode 100644 index 1f755be22f92..000000000000 --- a/arch/s390/lib/uaccess64.S +++ /dev/null @@ -1,207 +0,0 @@ -/* - * arch/s390x/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slgr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slgr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - aghi %r3,-1 - jo 6f - sacf 256 - bras %r1,4f -0: aghi %r3,257 -1: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - aghi %r3,-1 - jnz 1b -2: lgr %r2,%r3 - br %r14 -3: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -4: aghi %r3,-256 - jnm 3b -5: ex %r3,4(%r1) - sacf 0 -6: slgr %r2,%r2 - br 14 - .section __ex_table,"a" - .quad 1b,2b - .quad 3b,0b - .quad 5b,0b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - slgr %r0,%r0 - larl %r5,empty_zero_page -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slgr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - aghi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slgr %r2,%r2 - br %r14 -5: lghi %r0,-4096 - lgr %r4,%r2 - slgr %r4,%r0 - ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slgr %r4,%r2 # %r4 = #bytes to next user page boundary - clgr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slgr %r3,%r4 -7: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 1b,5b - .quad 3b,5b - .quad 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lghi %r0,0 - lgr %r1,%r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lgr %r1,%r2 - jh 1f # \0 found in string ? - aghi %r1,1 # include \0 in copy -1: slgr %r1,%r4 # %r1 = copy length (without \0) - slgr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - aghi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lghi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lghi %r0,0 - lgr %r1,%r3 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - aghi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slgr %r2,%r3 - br %r14 -2: sacf 0 - slgr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .quad 0b,2b - .previous diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c new file mode 100644 index 000000000000..9a4d4a29ea79 --- /dev/null +++ b/arch/s390/lib/uaccess_std.c @@ -0,0 +1,340 @@ +/* + * arch/s390/lib/uaccess_std.c + * + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + " jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " "SLR" %0,%0\n" + " j 3f\n" + "1: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 3f\n" + "2: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + "3:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + " jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " "SLR" %0,%0\n" + " j 3f\n" + "1: la %4,255(%1)\n" /* ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* (ptr + 255) & -4096UL */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 3f\n" + "2: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + "3:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) +{ + unsigned long tmp1; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + "0:"AHI" %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " "AHI" %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +size_t clear_user_std(size_t size, void __user *to) +{ + unsigned long tmp1, tmp2; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0:"AHI" %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " "SLR" %2,%1\n" + " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " "AHI" %2,-1\n" + "1: ex %2,0(%3)\n" + " "AHI" %2,1\n" + " "SLR" %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t strnlen_user_std(size_t size, const char __user *src) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " "SLR" %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " "SLR" %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %3,0(%1)\n" + " la %4,0(%0,%1)\n" + " sacf 256\n" + "0: srst %4,%3\n" + " jo 0b\n" + " sacf 0\n" + " la %0,0(%4)\n" + " jh 1f\n" /* found \0 in string ? */ + " "AHI" %4,1\n" /* include \0 in copy */ + "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */ + " "SLR" %4,%1\n" /* %4 = copy length (including \0) */ + "2: mvcp 0(%4,%2),0(%1),%5\n" + " jz 9f\n" + "3:"AHI" %4,-256\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "4: mvcp 0(%4,%2),0(%1),%5\n" + " jnz 3b\n" + " j 9f\n" + "7: sacf 0\n" + "8:"LHI" %0,%6\n" + "9:\n" + EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b) + : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0), "K" (-EFAULT) : "cc", "memory"); + return size; +} + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 0\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) +{ + int oldval = 0, newval, ret; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + dec_preempt_count(); + *old = oldval; + return ret; +} + +int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) +{ + int ret; + + asm volatile( + " sacf 256\n" + " cs %1,%4,0(%5)\n" + "0: lr %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + return ret; +} + +struct uaccess_ops uaccess_std = { + .copy_from_user = copy_from_user_std, + .copy_from_user_small = copy_from_user_std_small, + .copy_to_user = copy_to_user_std, + .copy_to_user_small = copy_to_user_std_small, + .copy_in_user = copy_in_user_std, + .clear_user = clear_user_std, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg, +}; |