summaryrefslogtreecommitdiff
path: root/arch/mips/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/Kbuild2
-rw-r--r--arch/mips/include/asm/atomic.h561
-rw-r--r--arch/mips/include/asm/cop2.h18
-rw-r--r--arch/mips/include/asm/cpu-features.h10
-rw-r--r--arch/mips/include/asm/cpu-info.h5
-rw-r--r--arch/mips/include/asm/fpu_emulator.h24
-rw-r--r--arch/mips/include/asm/ftrace.h4
-rw-r--r--arch/mips/include/asm/idle.h7
-rw-r--r--arch/mips/include/asm/kvm_host.h16
-rw-r--r--arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h12
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip28/spaces.h7
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/page.h5
-rw-r--r--arch/mips/include/asm/pgtable-bits.h44
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/mips/include/asm/processor.h6
-rw-r--r--arch/mips/include/asm/smp.h5
-rw-r--r--arch/mips/include/asm/suspend.h7
-rw-r--r--arch/mips/include/asm/switch_to.h4
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/topology.h8
-rw-r--r--arch/mips/include/uapi/asm/ioctls.h2
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h2
-rw-r--r--arch/mips/include/uapi/asm/swab.h18
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
26 files changed, 338 insertions, 462 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 335e5290ec75..72e1cf1cab00 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,8 +1,10 @@
# MIPS headers
generic-y += cputime.h
generic-y += current.h
+generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += hash.h
+generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mutex.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 37b2befe651a..6dd6bfc607e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -29,7 +29,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic_set - set atomic variable
@@ -40,195 +40,103 @@
*/
#define atomic_set(v, i) ((v)->counter = (i))
-/*
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ int atomic_add_return(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %0, %1 # atomic_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %0, %1 # atomic_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
-{
- int result;
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC_OPS(add, +=, addu)
+ATOMIC_OPS(sub, -=, subu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
-
- result = temp - i;
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic64_set - set atomic variable
@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define atomic64_set(v, i) ((v)->counter = (i))
-/*
- * atomic64_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic64_add(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
+#define ATOMIC64_OP(op, c_op, asm_op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %0, %1 # atomic64_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %0, %1 # atomic64_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
+ : "Ir" (i), "m" (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-/*
- * atomic64_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
-{
- long result;
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC64_OPS(add, +=, daddu)
+ATOMIC64_OPS(sub, -=, dsubu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " daddu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
-
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
-{
- long result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic variable
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index d0352983b94d..63b3468ede4c 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -16,8 +16,8 @@
extern void octeon_cop2_save(struct octeon_cop2_state *);
extern void octeon_cop2_restore(struct octeon_cop2_state *);
-#define cop2_save(r) octeon_cop2_save(r)
-#define cop2_restore(r) octeon_cop2_restore(r)
+#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2)
#define cop2_present 1
#define cop2_lazy_restore 1
@@ -26,26 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *);
extern void nlm_cop2_save(struct nlm_cop2_state *);
extern void nlm_cop2_restore(struct nlm_cop2_state *);
-#define cop2_save(r) nlm_cop2_save(r)
-#define cop2_restore(r) nlm_cop2_restore(r)
+
+#define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2)
#define cop2_present 1
#define cop2_lazy_restore 0
#elif defined(CONFIG_CPU_LOONGSON3)
-#define cop2_save(r)
-#define cop2_restore(r)
-
#define cop2_present 1
#define cop2_lazy_restore 1
+#define cop2_save(r) do { (void)(r); } while (0)
+#define cop2_restore(r) do { (void)(r); } while (0)
#else
#define cop2_present 0
#define cop2_lazy_restore 0
-#define cop2_save(r)
-#define cop2_restore(r)
+#define cop2_save(r) do { (void)(r); } while (0)
+#define cop2_restore(r) do { (void)(r); } while (0)
#endif
enum cu2_ops {
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index e079598ae051..3325f3eb248c 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -231,6 +231,16 @@
#define cpu_has_clo_clz cpu_has_mips_r
#endif
+/*
+ * MIPS32 R2, MIPS64 R2, Loongson 3A and Octeon have WSBH.
+ * MIPS64 R2, Loongson 3A and Octeon have WSBH, DSBH and DSHD.
+ * This indicates the availability of WSBH and in case of 64 bit CPUs also
+ * DSBH and DSHD.
+ */
+#ifndef cpu_has_wsbh
+#define cpu_has_wsbh cpu_has_mips_r2
+#endif
+
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index d5f42c168001..a6c9ccb33c5c 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -79,6 +79,11 @@ struct cpuinfo_mips {
#define NUM_WATCH_REGS 4
u16 watch_reg_masks[NUM_WATCH_REGS];
unsigned int kscratch_mask; /* Usable KScratch mask. */
+ /*
+ * Cache Coherency attribute for write-combine memory writes.
+ * (shifted by _CACHE_SHIFT)
+ */
+ unsigned int writecombine;
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 0195745b4b1b..3ee347713307 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -33,17 +33,17 @@
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
- local_t emulated;
- local_t loads;
- local_t stores;
- local_t cp1ops;
- local_t cp1xops;
- local_t errors;
- local_t ieee754_inexact;
- local_t ieee754_underflow;
- local_t ieee754_overflow;
- local_t ieee754_zerodiv;
- local_t ieee754_invalidop;
+ unsigned long emulated;
+ unsigned long loads;
+ unsigned long stores;
+ unsigned long cp1ops;
+ unsigned long cp1xops;
+ unsigned long errors;
+ unsigned long ieee754_inexact;
+ unsigned long ieee754_underflow;
+ unsigned long ieee754_overflow;
+ unsigned long ieee754_zerodiv;
+ unsigned long ieee754_invalidop;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#define MIPS_FPU_EMU_INC_STATS(M) \
do { \
preempt_disable(); \
- __local_inc(&__get_cpu_var(fpuemustats).M); \
+ __this_cpu_inc(fpuemustats.M); \
preempt_enable(); \
} while (0)
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 992aaba603b5..b463f2aa5a61 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -24,7 +24,7 @@ do { \
asm volatile ( \
"1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
" li %[tmp_err], 0\n" \
- "2:\n" \
+ "2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
@@ -46,7 +46,7 @@ do { \
asm volatile ( \
"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
" li %[tmp_err], 0\n" \
- "2:\n" \
+ "2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index d9f932de80e9..1c967abd545c 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -8,19 +8,12 @@ extern void (*cpu_wait)(void);
extern void r4k_wait(void);
extern asmlinkage void __r4k_wait(void);
extern void r4k_wait_irqoff(void);
-extern void __pastwait(void);
static inline int using_rollback_handler(void)
{
return cpu_wait == r4k_wait;
}
-static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
-{
- return addr >= (unsigned long)r4k_wait_irqoff &&
- addr < (unsigned long)__pastwait;
-}
-
extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 7a3fc67bd7f9..f2c249796ea8 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -96,11 +96,6 @@
#define CAUSEB_DC 27
#define CAUSEF_DC (_ULCAST_(1) << 27)
-struct kvm;
-struct kvm_run;
-struct kvm_vcpu;
-struct kvm_interrupt;
-
extern atomic_t kvm_mips_instance;
extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
@@ -767,5 +762,16 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
index 09f45e6afade..c5b6eef0efa7 100644
--- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
@@ -8,6 +8,12 @@
#define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H
#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
@@ -28,6 +34,8 @@
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_mmips 0
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 1
@@ -50,4 +58,8 @@
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_msa 0
+
#endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index cf8022872892..fa1f3cfbae8d 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -57,6 +57,7 @@
#define cpu_has_vint 0
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000
+#define cpu_has_wsbh 1
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h
index 5d6a76434d00..c4a912733b65 100644
--- a/arch/mips/include/asm/mach-ip28/spaces.h
+++ b/arch/mips/include/asm/mach-ip28/spaces.h
@@ -11,15 +11,8 @@
#ifndef _ASM_MACH_IP28_SPACES_H
#define _ASM_MACH_IP28_SPACES_H
-#define CAC_BASE _AC(0xa800000000000000, UL)
-
-#define HIGHMEM_START (~0UL)
-
#define PHYS_OFFSET _AC(0x20000000, UL)
-#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE UNCAC_BASE
-
#include <asm/mach-generic/spaces.h>
#endif /* _ASM_MACH_IP28_SPACES_H */
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index c0f3ef45c2c1..7d28f95b0512 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -59,4 +59,6 @@
#define cpu_has_watch 1
#define cpu_has_local_ebase 0
+#define cpu_has_wsbh IS_ENABLED(CONFIG_CPU_LOONGSON3)
+
#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 5699ec3a71af..3be81803595d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -37,7 +37,7 @@
/*
* This is used for calculating the real page sizes
- * for FTLB or VTLB + FTLB confugrations.
+ * for FTLB or VTLB + FTLB configurations.
*/
static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
{
@@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn)
#endif
-#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
+#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys((void *) \
+ (kaddr))))
extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index e592f3687d6f..e747bfa0be7e 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -224,38 +224,52 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define _CACHE_CACHABLE_NONCOHERENT 0
+#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
#elif defined(CONFIG_CPU_SB1)
/* No penalty for being coherent on the SB1, so just
use it for "noncoherent" spaces, too. Shouldn't hurt. */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#elif defined(CONFIG_CPU_LOONGSON3)
/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* LOONGSON */
#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* LOONGSON */
-#else
+#elif defined(CONFIG_MACH_JZ4740)
+
+/* Ingenic uses the WA bit to achieve write-combine memory writes */
+#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
-#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */
-#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */
+#endif
+#ifndef _CACHE_CACHABLE_NO_WA
+#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_WA
+#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_UNCACHED
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_NONCOHERENT
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_CE
+#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_COW
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_CUW
+#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_UNCACHED_ACCELERATED
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#endif
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index df49a308085c..d6d1928539b1 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -366,6 +366,16 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
return __pgprot(prot);
}
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
+ prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
+
+ return __pgprot(prot);
+}
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 05f08438a7c4..f1df4cb4a286 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define prefetchw(x) __builtin_prefetch((x), 1, 1)
-/*
- * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
- * systems.
- */
-#define __ARCH_WANT_UNLOCKED_CTXSW
-
#endif
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 1e0f20a9cdda..eacf865d21c2 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -37,11 +37,6 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
-#define topology_core_id(cpu) (cpu_data[cpu].core)
-#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
-
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
deleted file mode 100644
index 3adac3b53d19..000000000000
--- a/arch/mips/include/asm/suspend.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SUSPEND_H
-#define __ASM_SUSPEND_H
-
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
-#endif /* __ASM_SUSPEND_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 495c1041a2cc..b928b6f898cd 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -92,7 +92,7 @@ do { \
KSTK_STATUS(prev) &= ~ST0_CU2; \
__c0_stat = read_c0_status(); \
write_c0_status(__c0_stat | ST0_CU2); \
- cop2_save(&prev->thread.cp2); \
+ cop2_save(prev); \
write_c0_status(__c0_stat & ~ST0_CU2); \
} \
__clear_software_ll_bit(); \
@@ -111,7 +111,7 @@ do { \
(KSTK_STATUS(current) & ST0_CU2)) { \
__c0_stat = read_c0_status(); \
write_c0_status(__c0_stat | ST0_CU2); \
- cop2_restore(&current->thread.cp2); \
+ cop2_restore(current); \
write_c0_status(__c0_stat & ~ST0_CU2); \
} \
if (cpu_has_dsp) \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index cdf68b33bd65..bb7963753730 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -129,7 +129,7 @@ extern const unsigned long sysn32_call_table[];
static inline int syscall_get_arch(void)
{
- int arch = EM_MIPS;
+ int arch = AUDIT_ARCH_MIPS;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT_REGS)) {
arch |= __AUDIT_ARCH_64BIT;
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 20ea4859c822..3e307ec2afba 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -9,5 +9,13 @@
#define __ASM_TOPOLOGY_H
#include <topology.h>
+#include <linux/smp.h>
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
+#define topology_core_id(cpu) (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
+#endif
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
index b1e637757fe3..740219c2c894 100644
--- a/arch/mips/include/uapi/asm/ioctls.h
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -81,6 +81,8 @@
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
+#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485)
+#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485)
#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index bbcfb8ba8106..91a3d197ede3 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -9,6 +9,8 @@
#ifndef _UAPI_ASM_PTRACE_H
#define _UAPI_ASM_PTRACE_H
+#include <linux/types.h>
+
/* 0 - 31 are integer registers, 32 - 63 are fp registers. */
#define FPR_BASE 32
#define PC 64
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index ac9a8f9cd1fb..8f2d184dbe9f 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -13,12 +13,16 @@
#define __SWAB_64_THRU_32__
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
+ defined(_MIPS_ARCH_LOONGSON3A)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
+ " .set push \n"
+ " .set arch=mips32r2 \n"
" wsbh %0, %1 \n"
+ " .set pop \n"
: "=r" (x)
: "r" (x));
@@ -29,8 +33,11 @@ static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
+ " .set push \n"
+ " .set arch=mips32r2 \n"
" wsbh %0, %1 \n"
" rotr %0, %0, 16 \n"
+ " .set pop \n"
: "=r" (x)
: "r" (x));
@@ -46,8 +53,11 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__asm__(
- " dsbh %0, %1\n"
- " dshd %0, %0"
+ " .set push \n"
+ " .set arch=mips64r2 \n"
+ " dsbh %0, %1 \n"
+ " dshd %0, %0 \n"
+ " .set pop \n"
: "=r" (x)
: "r" (x));
@@ -55,5 +65,5 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
}
#define __arch_swab64 __arch_swab64
#endif /* __mips64 */
-#endif /* MIPS R2 or newer */
+#endif /* MIPS R2 or newer or Loongson 3A */
#endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 9bc13eaf9d67..fdb4923777d1 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -373,16 +373,18 @@
#define __NR_sched_getattr (__NR_Linux + 350)
#define __NR_renameat2 (__NR_Linux + 351)
#define __NR_seccomp (__NR_Linux + 352)
+#define __NR_getrandom (__NR_Linux + 353)
+#define __NR_memfd_create (__NR_Linux + 354)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 352
+#define __NR_Linux_syscalls 354
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 352
+#define __NR_O32_Linux_syscalls 354
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -703,16 +705,18 @@
#define __NR_sched_getattr (__NR_Linux + 310)
#define __NR_renameat2 (__NR_Linux + 311)
#define __NR_seccomp (__NR_Linux + 312)
+#define __NR_getrandom (__NR_Linux + 313)
+#define __NR_memfd_create (__NR_Linux + 314)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 312
+#define __NR_Linux_syscalls 314
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 312
+#define __NR_64_Linux_syscalls 314
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1037,15 +1041,17 @@
#define __NR_sched_getattr (__NR_Linux + 314)
#define __NR_renameat2 (__NR_Linux + 315)
#define __NR_seccomp (__NR_Linux + 316)
+#define __NR_getrandom (__NR_Linux + 317)
+#define __NR_memfd_create (__NR_Linux + 318)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 316
+#define __NR_Linux_syscalls 318
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 316
+#define __NR_N32_Linux_syscalls 318
#endif /* _UAPI_ASM_UNISTD_H */