summaryrefslogtreecommitdiff
path: root/arch/s390/lib/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r--arch/s390/lib/spinlock.c105
1 files changed, 52 insertions, 53 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..034a35a3e9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-void arch_spin_relax(arch_spinlock_t *lp)
-{
- unsigned int cpu = lp->lock;
- if (cpu != 0) {
- if (MACHINE_IS_VM || MACHINE_IS_KVM ||
- !smp_vcpu_scheduled(~cpu))
- smp_yield_cpu(~cpu);
- }
-}
-EXPORT_SYMBOL(arch_spin_relax);
-
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int count;
@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
- unsigned int old;
+ unsigned int owner, old;
int count = spin_retry;
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
+#endif
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
+ owner = ACCESS_ONCE(rw->owner);
if ((int) old < 0)
continue;
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
-{
- unsigned int old;
- int count = spin_retry;
-
- local_irq_restore(flags);
- while (1) {
- if (count-- <= 0) {
- smp_yield();
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0)
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, old, old + 1))
- return;
- local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-
int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(arch_rwlock_t *rw)
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
{
- unsigned int old;
+ unsigned int owner, old;
int count = spin_retry;
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
- return;
+ owner = ACCESS_ONCE(rw->owner);
+ smp_rmb();
+ if ((int) old >= 0) {
+ prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+ old = prev;
+ }
+ if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ break;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
- unsigned int old;
+ unsigned int owner, old, prev;
int count = spin_retry;
- local_irq_restore(flags);
+ prev = 0x80000000;
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
- return;
- local_irq_restore(flags);
+ owner = ACCESS_ONCE(rw->owner);
+ if ((int) old >= 0 &&
+ _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
+ prev = old;
+ else
+ smp_rmb();
+ if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ break;
}
}
-EXPORT_SYMBOL(_raw_write_lock_wait_flags);
+EXPORT_SYMBOL(_raw_write_lock_wait);
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
+
+void arch_lock_relax(unsigned int cpu)
+{
+ if (!cpu)
+ return;
+ if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
+ return;
+ smp_yield_cpu(~cpu);
+}
+EXPORT_SYMBOL(arch_lock_relax);