summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/atomic_64.h6
-rw-r--r--arch/tile/include/asm/barrier.h14
-rw-r--r--arch/tile/include/asm/bitops.h1
-rw-r--r--arch/tile/include/asm/bitops_32.h8
-rw-r--r--arch/tile/include/asm/bitops_64.h4
-rw-r--r--arch/tile/include/asm/irq.h6
-rw-r--r--arch/tile/include/asm/thread_info.h3
-rw-r--r--arch/tile/include/asm/topology.h33
9 files changed, 23 insertions, 62 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1ad4a1f7d42b..1b109fad9fff 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -169,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-/*
- * We need to barrier before modifying the word, since the _atomic_xxx()
- * routines just tns the lock and then read/modify/write of the word.
- * But after the word is updated, the routine issues an "mf" before returning,
- * and since it's a function call, we don't even need a compiler barrier.
- */
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_dec() do { } while (0)
-#define smp_mb__after_atomic_inc() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index ad220eed05fc..7b11c5fadd42 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-/* Atomic dec and inc don't implement barrier, so provide them if needed. */
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
-
/* Define this to indicate that cmpxchg is an efficient operation. */
#define __HAVE_ARCH_CMPXCHG
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index b5a05d050a8f..96a42ae79f4d 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -72,6 +72,20 @@ mb_incoherent(void)
#define mb() fast_mb()
#define iob() fast_iob()
+#ifndef __tilegx__ /* 32 bit */
+/*
+ * We need to barrier before modifying the word, since the _atomic_xxx()
+ * routines just tns the lock and then read/modify/write of the word.
+ * But after the word is updated, the routine issues an "mf" before returning,
+ * and since it's a function call, we don't even need a compiler barrier.
+ */
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() do { } while (0)
+#else /* 64 bit */
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+#endif
+
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index d5a206865036..20caa346ac06 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -17,6 +17,7 @@
#define _ASM_TILE_BITOPS_H
#include <linux/types.h>
+#include <asm/barrier.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 386865ad2f55..bbf7b666f21d 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
* restricted to acting on a single-word quantity.
*
* clear_bit() may not contain a memory barrier, so if it is used for
- * locking purposes, you should call smp_mb__before_clear_bit() and/or
- * smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
+ * locking purposes, you should call smp_mb__before_atomic() and/or
+ * smp_mb__after_atomic() to ensure changes are visible on other cpus.
*/
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
@@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr,
return (_atomic_xor(addr, mask) & mask) != 0;
}
-/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() do {} while (0)
-
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index ad34cd056085..bb1a29221fcd 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
}
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-
-
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 33cff9a3058b..1fe86911838b 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -18,10 +18,12 @@
#include <linux/hardirq.h>
/* The hypervisor interface provides 32 IRQs. */
-#define NR_IRQS 32
+#define NR_IRQS 32
/* IRQ numbers used for linux IPIs. */
-#define IRQ_RESCHEDULE 0
+#define IRQ_RESCHEDULE 0
+/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */
+#define NR_IRQS_LEGACY 1
#define irq_canonicalize(irq) (irq)
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 729aa107f64e..d767ff9f59b9 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -129,6 +129,7 @@ extern void _cpu_idle(void);
#define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
+#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -140,6 +141,7 @@ extern void _cpu_idle(void);
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
@@ -162,7 +164,6 @@ extern void _cpu_idle(void);
#ifdef __tilegx__
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
#endif
-#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
#ifndef __ASSEMBLY__
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index d15c0d8d550f..938311844233 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
/* For now, use numa node -1 for global allocation. */
#define pcibus_to_node(bus) ((void)(bus), -1)
-/*
- * TILE architecture has many cores integrated in one processor, so we need
- * setup bigger balance_interval for both CPU/NODE scheduling domains to
- * reduce process scheduling costs.
- */
-
-/* sched_domains SD_CPU_INIT for TILE architecture */
-#define SD_CPU_INIT (struct sched_domain) { \
- .min_interval = 4, \
- .max_interval = 128, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 0, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
- \
- .flags = 1*SD_LOAD_BALANCE \
- | 1*SD_BALANCE_NEWIDLE \
- | 1*SD_BALANCE_EXEC \
- | 1*SD_BALANCE_FORK \
- | 0*SD_BALANCE_WAKE \
- | 0*SD_WAKE_AFFINE \
- | 0*SD_SHARE_CPUPOWER \
- | 0*SD_SHARE_PKG_RESOURCES \
- | 0*SD_SERIALIZE \
- , \
- .last_balance = jiffies, \
- .balance_interval = 32, \
-}
-
/* By definition, we create nodes based on online memory. */
#define node_has_online_mem(nid) 1