summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/spinlock.h66
-rw-r--r--arch/s390/include/asm/spinlock_types.h8
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/lib/spinlock.c46
6 files changed, 63 insertions, 63 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 495589950dc7..5c91995b74e4 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev)
return appldata_restore(dev);
}
-static struct dev_pm_ops appldata_pm_ops = {
+static const struct dev_pm_ops appldata_pm_ops = {
.freeze = appldata_freeze,
.thaw = appldata_thaw,
.restore = appldata_restore,
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e885442c1dfe..354d42616c7e 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -155,7 +155,6 @@ extern unsigned int vdso_enabled;
} while (0)
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..a587907d77f3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
* (the type definitions are in asm/spinlock_types.h)
*/
-#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) \
- _raw_spin_relax(lock); } while (0)
+#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) \
+ arch_spin_relax(lock); } while (0)
-extern void _raw_spin_lock_wait(raw_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *);
-extern void _raw_spin_relax(raw_spinlock_t *lock);
+extern void arch_spin_lock_wait(arch_spinlock_t *);
+extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int arch_spin_trylock_retry(arch_spinlock_t *);
+extern void arch_spin_relax(arch_spinlock_t *lock);
-static inline void __raw_spin_lock(raw_spinlock_t *lp)
+static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait(lp);
+ arch_spin_lock_wait(lp);
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait_flags(lp, flags);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return 1;
- return _raw_spin_trylock_retry(lp);
+ return arch_spin_trylock_retry(lp);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
return _raw_write_trylock_retry(rw);
}
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de04..9c76656a0af0 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int owner_cpu;
-} __attribute__ ((aligned (4))) raw_spinlock_t;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 071c81f179ef..0168472b2fdf 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/sysctl.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -1178,7 +1179,7 @@ debug_get_uint(char *buf)
{
int rc;
- for(; isspace(*buf); buf++);
+ buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
rc = -EINVAL;
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..10754a375668 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
_raw_yield();
}
-void _raw_spin_lock_wait(raw_spinlock_t *lp)
+void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return;
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait);
+EXPORT_SYMBOL(arch_spin_lock_wait);
-void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
+void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
for (count = spin_retry; count > 0; count--) {
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return 1;
}
return 0;
}
-EXPORT_SYMBOL(_raw_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_spin_relax(raw_spinlock_t *lock)
+void arch_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
_raw_yield_cpu(~cpu);
}
-EXPORT_SYMBOL(_raw_spin_relax);
+EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
local_irq_disable();
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;