summaryrefslogtreecommitdiff
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/s390/lib/delay.c11
-rw-r--r--arch/s390/lib/div64.c151
-rw-r--r--arch/s390/lib/spinlock.c62
-rw-r--r--arch/s390/lib/uaccess_mvcos.c22
-rw-r--r--arch/s390/lib/uaccess_std.c36
6 files changed, 237 insertions, 46 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index c42ffedfdb49..b0cfa6c4883d 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -5,5 +5,6 @@
EXTRA_AFLAGS := -traditional
lib-y += delay.o string.o uaccess_std.o
+lib-$(CONFIG_32BIT) += div64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 468f4ea33f99..027c4742a001 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -27,9 +27,7 @@ void __delay(unsigned long loops)
* yield the megahertz number of the cpu. The important function
* is udelay and that is done using the tod clock. -- martin.
*/
- __asm__ __volatile__(
- "0: brct %0,0b"
- : /* no outputs */ : "r" ((loops/2) + 1));
+ asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
}
/*
@@ -38,13 +36,12 @@ void __delay(unsigned long loops)
*/
void __udelay(unsigned long usecs)
{
- uint64_t start_cc, end_cc;
+ uint64_t start_cc;
if (usecs == 0)
return;
- asm volatile ("STCK %0" : "=m" (start_cc));
+ start_cc = get_clock();
do {
cpu_relax();
- asm volatile ("STCK %0" : "=m" (end_cc));
- } while (((end_cc - start_cc)/4096) < usecs);
+ } while (((get_clock() - start_cc)/4096) < usecs);
}
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
new file mode 100644
index 000000000000..0481f3424a13
--- /dev/null
+++ b/arch/s390/lib/div64.c
@@ -0,0 +1,151 @@
+/*
+ * arch/s390/lib/div64.c
+ *
+ * __div64_32 implementation for 31 bit.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_MARCH_G5
+
+/*
+ * Function to divide an unsigned 64 bit integer by an unsigned
+ * 31 bit integer using signed 64/32 bit division.
+ */
+static uint32_t __div64_31(uint64_t *n, uint32_t base)
+{
+ register uint32_t reg2 asm("2");
+ register uint32_t reg3 asm("3");
+ uint32_t *words = (uint32_t *) n;
+ uint32_t tmp;
+
+ /* Special case base==1, remainder = 0, quotient = n */
+ if (base == 1)
+ return 0;
+ /*
+ * Special case base==0 will cause a fixed point divide exception
+ * on the dr instruction and may not happen anyway. For the
+ * following calculation we can assume base > 1. The first
+ * signed 64 / 32 bit division with an upper half of 0 will
+ * give the correct upper half of the 64 bit quotient.
+ */
+ reg2 = 0UL;
+ reg3 = words[0];
+ asm volatile(
+ " dr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[0] = reg3;
+ reg3 = words[1];
+ /*
+ * To get the lower half of the 64 bit quotient and the 32 bit
+ * remainder we have to use a little trick. Since we only have
+ * a signed division the quotient can get too big. To avoid this
+ * the 64 bit dividend is halved, then the signed division will
+ * work. Afterwards the quotient and the remainder are doubled.
+ * If the last bit of the dividend has been one the remainder
+ * is increased by one then checked against the base. If the
+ * remainder has overflown subtract base and increase the
+ * quotient. Simple, no ?
+ */
+ asm volatile(
+ " nr %2,%1\n"
+ " srdl %0,1\n"
+ " dr %0,%3\n"
+ " alr %0,%0\n"
+ " alr %1,%1\n"
+ " alr %0,%2\n"
+ " clr %0,%3\n"
+ " jl 0f\n"
+ " slr %0,%3\n"
+ " alr %1,%2\n"
+ "0:\n"
+ : "+d" (reg2), "+d" (reg3), "=d" (tmp)
+ : "d" (base), "2" (1UL) : "cc" );
+ words[1] = reg3;
+ return reg2;
+}
+
+/*
+ * Function to divide an unsigned 64 bit integer by an unsigned
+ * 32 bit integer using the unsigned 64/31 bit division.
+ */
+uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ uint32_t r;
+
+ /*
+ * If the most significant bit of base is set, divide n by
+ * (base/2). That allows to use 64/31 bit division and gives a
+ * good approximation of the result: n = (base/2)*q + r. The
+ * result needs to be corrected with two simple transformations.
+ * If base is already < 2^31-1 __div64_31 can be used directly.
+ */
+ r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
+ if ((signed) base < 0) {
+ uint64_t q = *n;
+ /*
+ * First transformation:
+ * n = (base/2)*q + r
+ * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
+ * Since r < (base/2), r + (base/2) < base.
+ * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
+ * n = ((base/2)*2)*q1 + r1 with r1 < base.
+ */
+ if (q & 1)
+ r += base/2;
+ q >>= 1;
+ /*
+ * Second transformation. ((base/2)*2) could have lost the
+ * last bit.
+ * n = ((base/2)*2)*q1 + r1
+ * = base*q1 - ((base&1) ? q1 : 0) + r1
+ */
+ if (base & 1) {
+ int64_t rx = r - q;
+ /*
+ * base is >= 2^31. The worst case for the while
+ * loop is n=2^64-1 base=2^31+1. That gives a
+ * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
+ * base >= 2^31 the loop is finished after a maximum
+ * of three iterations.
+ */
+ while (rx < 0) {
+ rx += base;
+ q--;
+ }
+ r = rx;
+ }
+ *n = q;
+ }
+ return r;
+}
+
+#else /* MARCH_G5 */
+
+uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ register uint32_t reg2 asm("2");
+ register uint32_t reg3 asm("3");
+ uint32_t *words = (uint32_t *) n;
+
+ reg2 = 0UL;
+ reg3 = words[0];
+ asm volatile(
+ " dlr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[0] = reg3;
+ reg3 = words[1];
+ asm volatile(
+ " dlr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[1] = reg3;
+ return reg2;
+}
+
+#endif /* MARCH_G5 */
+
+EXPORT_SYMBOL(__div64_32);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index b9b7958a226a..8d76403fcf89 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -24,57 +24,76 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
-static inline void
-_diag44(void)
+static inline void _raw_yield(void)
{
-#ifdef CONFIG_64BIT
if (MACHINE_HAS_DIAG44)
-#endif
asm volatile("diag 0,0,0x44");
}
-void
-_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
+static inline void _raw_yield_cpu(int cpu)
+{
+ if (MACHINE_HAS_DIAG9C)
+ asm volatile("diag %0,0,0x9c"
+ : : "d" (__cpu_logical_map[cpu]));
+ else
+ _raw_yield();
+}
+
+void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
{
int count = spin_retry;
+ unsigned int cpu = ~smp_processor_id();
while (1) {
if (count-- <= 0) {
- _diag44();
+ unsigned int owner = lp->owner_cpu;
+ if (owner != 0)
+ _raw_yield_cpu(~owner);
count = spin_retry;
}
if (__raw_spin_is_locked(lp))
continue;
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+ lp->owner_pc = pc;
return;
+ }
}
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
-int
-_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
+int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
{
- int count = spin_retry;
+ unsigned int cpu = ~smp_processor_id();
+ int count;
- while (count-- > 0) {
+ for (count = spin_retry; count > 0; count--) {
if (__raw_spin_is_locked(lp))
continue;
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+ lp->owner_pc = pc;
return 1;
+ }
}
return 0;
}
EXPORT_SYMBOL(_raw_spin_trylock_retry);
-void
-_raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_spin_relax(raw_spinlock_t *lock)
+{
+ unsigned int cpu = lock->owner_cpu;
+ if (cpu != 0)
+ _raw_yield_cpu(~cpu);
+}
+EXPORT_SYMBOL(_raw_spin_relax);
+
+void _raw_read_lock_wait(raw_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ _raw_yield();
count = spin_retry;
}
if (!__raw_read_can_lock(rw))
@@ -86,8 +105,7 @@ _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-int
-_raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(raw_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -103,14 +121,13 @@ _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void
-_raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(raw_rwlock_t *rw)
{
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ _raw_yield();
count = spin_retry;
}
if (!__raw_write_can_lock(rw))
@@ -121,8 +138,7 @@ _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-int
-_raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(raw_rwlock_t *rw)
{
int count = spin_retry;
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 86c96d6c191a..121b2935a422 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -35,7 +35,7 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
tmp1 = -4096UL;
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
- " jz 4f\n"
+ " jz 7f\n"
"1:"ALR" %0,%3\n"
" "SLR" %1,%3\n"
" "SLR" %2,%3\n"
@@ -44,13 +44,23 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
+ " jnh 4f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
" "SLR" %0,%4\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ " "ALR" %2,%4\n"
+ "4:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,6f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "5: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "6:"AHI" %4,-256\n"
+ " jnm 5b\n"
+ " ex %4,0(%3)\n"
+ " j 8f\n"
+ "7:"SLR" %0,%0\n"
+ "8: \n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
return size;
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 9a4d4a29ea79..f44f0078b354 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -35,25 +35,35 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
tmp1 = -256UL;
asm volatile(
"0: mvcp 0(%0,%2),0(%1),%3\n"
- " jz 5f\n"
+ " jz 8f\n"
"1:"ALR" %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
" jnz 1b\n"
- " j 5f\n"
+ " j 8f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" "LHI" %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
+ " jnh 5f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
" "SLR" %0,%4\n"
- " j 6f\n"
- "5:"SLR" %0,%0\n"
- "6: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ " "ALR" %2,%4\n"
+ "5:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,7f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "6: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "7:"AHI" %4,-256\n"
+ " jnm 6b\n"
+ " ex %4,0(%3)\n"
+ " j 9f\n"
+ "8:"SLR" %0,%0\n"
+ "9: \n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
@@ -67,16 +77,22 @@ size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
asm volatile(
"0: mvcp 0(%0,%2),0(%1),%3\n"
" "SLR" %0,%0\n"
- " j 3f\n"
+ " j 5f\n"
"1: la %4,255(%1)\n" /* %4 = ptr + 255 */
" "LHI" %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 3f\n"
+ " jnh 5f\n"
"2: mvcp 0(%4,%2),0(%1),%3\n"
" "SLR" %0,%4\n"
- "3:\n"
+ " "ALR" %2,%4\n"
+ "3:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,4f\n"
+ " xc 0(1,%2),0(%2)\n"
+ "4: ex %4,0(%3)\n"
+ "5:\n"
EX_TABLE(0b,1b) EX_TABLE(2b,3b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");