summaryrefslogtreecommitdiff
path: root/arch/xtensa/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm')
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/include/asm/atomic.h124
-rw-r--r--arch/xtensa/include/asm/bitops.h323
-rw-r--r--arch/xtensa/include/asm/cache.h6
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h71
-rw-r--r--arch/xtensa/include/asm/fixmap.h8
-rw-r--r--arch/xtensa/include/asm/futex.h10
-rw-r--r--arch/xtensa/include/asm/hw_irq.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h3
-rw-r--r--arch/xtensa/include/asm/io.h12
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h29
-rw-r--r--arch/xtensa/include/asm/page.h11
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/include/asm/platform.h27
-rw-r--r--arch/xtensa/include/asm/processor.h3
-rw-r--r--arch/xtensa/include/asm/syscall.h4
-rw-r--r--arch/xtensa/include/asm/uaccess.h94
-rw-r--r--arch/xtensa/include/asm/user.h20
-rw-r--r--arch/xtensa/include/asm/vectors.h44
19 files changed, 318 insertions, 491 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index ffa0cf7f66c3..3acc31e55e02 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += hardirq.h
+generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
@@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7b00d26f472e..3e7c6134ed32 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
} \
@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- " " #op " %0, %1, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
} \
@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- " " #op " %0, %0, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15, "__stringify(TOPLEVEL)"\n"\
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " rsil a15, "__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
} \
@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %3, 0\n" \
- " " #op " %1, %0, %2\n" \
- " s32i %1, %3, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[tmp], %[result], %[i]\n" \
+ " s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index aeb15f4c755b..3f71d364ba90 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " or %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " and %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (~mask), "a" (p)
- : "memory");
-}
-
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " xor %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (~mask), "a" (p)
- : "memory");
-}
-
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " or %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " and %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " xor %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " "insn" %[tmp], %[tmp], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[value], %[addr]\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+ \
+ return value & mask; \
}
#elif XCHAL_HAVE_S32C1I
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+ \
+ return tmp & mask; \
}
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
+#else
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#include <asm-generic/bitops/atomic.h>
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#endif /* XCHAL_HAVE_S32C1I */
-#else
+#define BIT_OPS(op, insn, inv) \
+ BIT_OP(op, insn, inv) \
+ TEST_AND_BIT_OP(op, insn, inv)
-#include <asm-generic/bitops/atomic.h>
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
-#endif /* XCHAL_HAVE_S32C1I */
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index b21fd133ff62..54e147ac26bf 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 7ccc5cbf441b..a175f8aec3fb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " bne %0, %4, 2f\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
+ "1: l32ex %[result], %[addr]\n"
+ " bne %[result], %[cmp], 2f\n"
+ " mov %[tmp], %[new]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
"2:\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (new), "a" (p), "a" (old)
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
- " wsr %2, scompare1\n"
- " s32c1i %0, %1, 0\n"
- : "+a" (new)
- : "a" (p), "a" (old)
+ " wsr %[cmp], scompare1\n"
+ " s32c1i %[new], %[mem]\n"
+ : [new] "+a" (new), [mem] "+m" (*p)
+ : [cmp] "a" (old)
: "memory"
);
@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " bne %0, %2, 1f\n"
- " s32i %3, %1, 0\n"
+ " l32i %[old], %[mem]\n"
+ " bne %[old], %[cmp], 1f\n"
+ " s32i %[new], %[mem]\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
+ : [old] "=&a" (old), [mem] "+m" (*p)
+ : [cmp] "a" (old), [new] "r" (new)
: "a15", "memory");
return old;
#endif
@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (val), "a" (m)
+ "1: l32ex %[result], %[addr]\n"
+ " mov %[tmp], %[val]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [val] "a" (val), [addr] "a" (m)
: "memory"
);
@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32i %1, %2, 0\n"
- " mov %0, %3\n"
- " wsr %1, scompare1\n"
- " s32c1i %0, %2, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (m), "a" (val)
+ "1: l32i %[tmp], %[mem]\n"
+ " mov %[result], %[val]\n"
+ " wsr %[tmp], scompare1\n"
+ " s32c1i %[result], %[mem]\n"
+ " bne %[result], %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp),
+ [mem] "+m" (*m)
+ : [val] "a" (val)
: "memory"
);
return result;
@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " s32i %2, %1, 0\n"
+ " l32i %[tmp], %[mem]\n"
+ " s32i %[val], %[mem]\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
+ : [tmp] "=&a" (tmp), [mem] "+m" (*m)
+ : [val] "a" (val)
: "a15", "memory");
return tmp;
#endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 7e25c1b50ac0..cfb8696917e9 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
- pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
- (vaddr) \
- )
+ pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr))
#endif
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 0c4457ca0a85..964611083224 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
- "1: l32i %[oldval], %[addr], 0\n" \
+ "1: l32i %[oldval], %[mem]\n" \
insn "\n" \
" wsr %[oldval], scompare1\n" \
- "2: s32c1i %[newval], %[addr], 0\n" \
+ "2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
@@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
- : [oldval] "=&r" (old), [newval] "=&r" (ret) \
- : [addr] "r" (uaddr), [oparg] "r" (arg), \
- [fault] "I" (-EFAULT) \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
+ [mem] "+m" (*(uaddr)) \
+ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory")
#endif
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
deleted file mode 100644
index 3ddbea759b2b..000000000000
--- a/arch/xtensa/include/asm/hw_irq.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * include/asm-xtensa/hw_irq.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_HW_IRQ_H
-#define _XTENSA_HW_IRQ_H
-
-#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 3b054d2bede0..e3e1d9a1ef69 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
+#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/vectors.h>
@@ -183,7 +184,7 @@
#endif
#if XCHAL_HAVE_MPU
- .data
+ __REFCONST
.align 4
.Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 988e08530a5c..54188e69b988 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -32,8 +32,7 @@ void xtensa_iounmap(volatile void __iomem *addr);
/*
* Return the virtual address for the specified bus memory.
*/
-static inline void __iomem *ioremap_nocache(unsigned long offset,
- unsigned long size)
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
@@ -52,15 +51,6 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
return xtensa_ioremap_cache(offset, size);
}
#define ioremap_cache ioremap_cache
-#define ioremap_nocache ioremap_nocache
-
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
-
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
-{
- return ioremap_nocache(offset, size);
-}
static inline void iounmap(volatile void __iomem *addr)
{
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 9c12babc016c..7cbf68ca7106 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H
+#include <asm/core.h>
#include <asm/types.h>
#ifdef CONFIG_MMU
@@ -65,6 +66,34 @@
#endif
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 09c56cba442e..f4771c29c7e9 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE;
+#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET;
+#else
+ if (off < XCHAL_KSEG_SIZE)
+ return off + PHYS_OFFSET;
+
+ off -= XCHAL_KSEG_SIZE;
+ if (off >= XCHAL_KIO_SIZE)
+ off -= XCHAL_KIO_SIZE;
+
+ return off + XCHAL_KIO_PADDR;
+#endif
}
#define __pa(x) ___pa((unsigned long)(x))
#else
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 3f7fe5a8c286..27ac17c9da09 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir,address) ((pmd_t*)(dir))
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 913826dfa838..f2c48522c5a1 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -65,31 +65,4 @@ extern void platform_calibrate_ccount (void);
*/
void cpu_reset(void) __attribute__((noreturn));
-/*
- * Memory caching is platform-dependent in noMMU xtensa configurations.
- * The following set of functions should be implemented in platform code
- * in order to enable coherent DMA memory operations when CONFIG_MMU is not
- * enabled. Default implementations do nothing and issue a warning.
- */
-
-/*
- * Check whether p points to a cached memory.
- */
-bool platform_vaddr_cached(const void *p);
-
-/*
- * Check whether p points to an uncached memory.
- */
-bool platform_vaddr_uncached(const void *p);
-
-/*
- * Return pointer to an uncached view of the cached sddress p.
- */
-void *platform_vaddr_to_uncached(void *p);
-
-/*
- * Return pointer to a cached view of the uncached sddress p.
- */
-void *platform_vaddr_to_cached(void *p);
-
#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 7495520d7a3e..6fa903daf2a2 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
do { \
+ unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
@@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \
(regs)->windowbase = 0; \
(regs)->windowstart = 1; \
- (regs)->syscall = NO_SYSCALL; \
+ (regs)->syscall = syscall; \
} while (0)
/* Forward declaration */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 359ab40e935a..f9a671cbf933 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- regs->areg[0] = (long) error ? error : val;
+ regs->areg[2] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i];
}
-asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..47b7702aaa40 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do { \
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
break; \
} \
default: __put_user_bad(); \
@@ -132,14 +132,14 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
- " _bbci.l %3, 0, 1f \n" \
- " movi %0, %4 \n" \
+ " _bbci.l %[mem] * 0, 1f \n" \
+ " movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
- " _bbsi.l %3, 0, 0f \n" \
- " _bbci.l %3, 1, 1f \n" \
- "0: movi %0, %4 \n" \
+ " _bbsi.l %[mem] * 0, 0f \n" \
+ " _bbci.l %[mem] * 0 + 1, 1f \n" \
+ "0: movi %[err], %[efault] \n" \
" _j 2f \n"
@@ -151,40 +151,40 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __put_user_asm(x, addr, err, align, insn, cb) \
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
+ "1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
- " movi %1, 2b \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
- :"=r" (err), "=r" (cb) \
- :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
+ :[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = 0; \
__gu_err; \
})
@@ -198,8 +198,17 @@ do { \
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
- default: (x) = __get_user_bad(); \
+ case 8: { \
+ u64 __x; \
+ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
+ retval = -EFAULT; \
+ (x) = 0; \
+ } else { \
+ (x) = *(__force __typeof__((ptr)))&__x; \
+ } \
+ break; \
+ } \
+ default: (x) = 0; __get_user_bad(); \
} \
} while (0)
@@ -208,25 +217,28 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__( \
- __check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
- "2: \n" \
- " .section .fixup,\"ax\" \n" \
- " .align 4 \n" \
- " .literal_position \n" \
- "5: \n" \
- " movi %1, 2b \n" \
- " movi %2, 0 \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " .long 1b, 5b \n" \
- " .previous" \
- :"=r" (err), "=r" (cb), "=r" (x) \
- :"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do { \
+ u32 __x = 0; \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[mem] \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
+ (x_) = (__force __typeof__(*(addr_)))__x; \
+} while (0)
/*
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
deleted file mode 100644
index 2c3ed23354a8..000000000000
--- a/arch/xtensa/include/asm/user.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-xtensa/user.h
- *
- * Xtensa Processor version.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_USER_H
-#define _XTENSA_USER_H
-
-/* This file usually defines a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on Xtensa.
- */
-
-#endif /* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 79fe3007919e..fd99b25037a7 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -21,50 +21,18 @@
#include <asm/core.h>
#include <asm/kmem_layout.h>
-#if XCHAL_HAVE_PTP_MMU
-#define XCHAL_KIO_CACHED_VADDR 0xe0000000
-#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
-#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#else
-#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
-#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
-#endif
-#define XCHAL_KIO_SIZE 0x10000000
-
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
-#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
-#ifndef __ASSEMBLY__
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
- return xtensa_kio_paddr;
-}
-#endif
-#else
-#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
-#endif
-
-#if defined(CONFIG_MMU)
-
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
-/* Image Virtual Start Address */
-#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
- CONFIG_KERNEL_LOAD_ADDRESS - \
+#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
+ XCHAL_KSEG_CACHED_VADDR - \
XCHAL_KSEG_PADDR)
+#endif
#else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
-#else /* !defined(CONFIG_MMU) */
- /* MMU Not being used - Virtual == Physical */
-
-/* Location of the start of the kernel text, _start */
-#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
-
-
-#endif /* CONFIG_MMU */
-
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)