summaryrefslogtreecommitdiff
path: root/include/asm-xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r--include/asm-xtensa/atomic.h22
-rw-r--r--include/asm-xtensa/bitops.h2
-rw-r--r--include/asm-xtensa/dma-mapping.h2
-rw-r--r--include/asm-xtensa/elf.h2
-rw-r--r--include/asm-xtensa/hardirq.h1
-rw-r--r--include/asm-xtensa/pgtable.h3
-rw-r--r--include/asm-xtensa/semaphore.h51
-rw-r--r--include/asm-xtensa/system.h16
8 files changed, 40 insertions, 59 deletions
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 24f86f0e43cf..3670cc7695da 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t;
#include <asm/processor.h>
#include <asm/system.h>
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
/*
* This Xtensa implementation assumes that the right mechanism
@@ -223,6 +223,26 @@ static inline int atomic_sub_return(int i, atomic_t * v)
*/
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index d395ef226c32..e76ee889e21d 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr)
return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
}
-#if XCHAL_HAVE_NSAU
+#if XCHAL_HAVE_NSA
static __inline__ int __cntlz (unsigned long x)
{
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index e86a206f1209..c425f10d086a 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -28,7 +28,7 @@ extern void consistent_sync(void*, size_t, int);
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
index 64f1f53874fe..de0667453b2e 100644
--- a/include/asm-xtensa/elf.h
+++ b/include/asm-xtensa/elf.h
@@ -209,6 +209,8 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
+struct task_struct;
+
extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h
index e07c76c36b95..aa9c1adf68d7 100644
--- a/include/asm-xtensa/hardirq.h
+++ b/include/asm-xtensa/hardirq.h
@@ -23,6 +23,7 @@ typedef struct {
unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
+void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#endif /* _XTENSA_HARDIRQ_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 987e3b802313..7b15afb70c56 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -278,6 +278,8 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
#endif
}
+struct mm_struct;
+
static inline void
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{
@@ -294,6 +296,7 @@ set_pmd(pmd_t *pmdp, pmd_t pmdval)
#endif
}
+struct vm_area_struct;
static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index db740b8bc6f0..f10c3487cd4c 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -20,28 +20,16 @@ struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
-#if WAITQUEUE_DEBUG
- long __magic;
-#endif
};
-#if WAITQUEUE_DEBUG
-# define __SEM_DEBUG_INIT(name) \
- , (int)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT(count), \
- 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
- __SEM_DEBUG_INIT(name) }
-
-#define __MUTEX_INITIALIZER(name) \
- __SEMAPHORE_INITIALIZER(name, 1)
+#define __SEMAPHORE_INITIALIZER(name,n) \
+{ \
+ .count = ATOMIC_INIT(n), \
+ .sleepers = 0, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
@@ -49,17 +37,9 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
- */
atomic_set(&sem->count, val);
+ sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
-#if WAITQUEUE_DEBUG
- sem->__magic = (int)&sem->__magic;
-#endif
}
static inline void init_MUTEX (struct semaphore *sem)
@@ -81,9 +61,7 @@ extern spinlock_t semaphore_wake_lock;
static inline void down(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
+ might_sleep();
if (atomic_sub_return(1, &sem->count) < 0)
__down(sem);
@@ -92,9 +70,8 @@ static inline void down(struct semaphore * sem)
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
+
+ might_sleep();
if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_interruptible(sem);
@@ -104,9 +81,6 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_trylock(sem);
@@ -119,9 +93,6 @@ static inline int down_trylock(struct semaphore * sem)
*/
static inline void up(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_add_return(1, &sem->count) <= 0)
__up(sem);
}
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index f09393232e5e..9284867f1cb9 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#define tas(ptr) (xchg((ptr),1))
-#if ( __XCC__ == 1 )
-
-/* xt-xcc processes __inline__ differently than xt-gcc and decides to
- * insert an out-of-line copy of function __xchg. This presents the
- * unresolved symbol at link time of __xchg_called_with_bad_pointer,
- * even though such a function would never be called at run-time.
- * xt-gcc always inlines __xchg, and optimizes away the undefined
- * bad_pointer function.
- */
-
-#define xchg(ptr,x) xchg_u32(ptr,x)
-
-#else /* assume xt-gcc */
-
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
@@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
return x;
}
-#endif
-
extern void set_except_vector(int n, void *addr);
static inline void spill_registers(void)