#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 */

typedef struct { volatile int counter; } atomic_t;

#define ATOMIC_INIT(i)	( (atomic_t) { (i) } )

#define atomic_read(v)		((v)->counter)
#define atomic_set(v,i)		((v)->counter = (i))

#include <linux/compiler.h>
#include <asm/system.h>

/*
 * To get proper branch prediction for the main line, we must branch
 * forward to code at the end of this object's .text section, then
 * branch back to restart the operation.
 */
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
	unsigned long tmp;

	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_add	\n"
"	add	%2, %0				\n"
"	movco.l	%0, @%3				\n"
"	bf	1b				\n"
	: "=&z" (tmp), "=r" (&v->counter)
	: "r" (i), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	*(long *)v += i;
	local_irq_restore(flags);
#endif
}

static inline void atomic_sub(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
	unsigned long tmp;

	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_sub	\n"
"	sub	%2, %0				\n"
"	movco.l	%0, @%3				\n"
"	bf	1b				\n"
	: "=&z" (tmp), "=r" (&v->counter)
	: "r" (i), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	*(long *)v -= i;
	local_irq_restore(flags);
#endif
}

/*
 * SH-4A note:
 *
 * We basically get atomic_xxx_return() for free compared with
 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
 * encoding, so the retval is automatically set without having to
 * do any special work.
 */
static inline int atomic_add_return(int i, atomic_t *v)
{
	unsigned long temp;

#ifdef CONFIG_CPU_SH4A
	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_add_return	\n"
"	add	%2, %0					\n"
"	movco.l	%0, @%3					\n"
"	bf	1b					\n"
"	synco						\n"
	: "=&z" (temp), "=r" (&v->counter)
	: "r" (i), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	temp = *(long *)v;
	temp += i;
	*(long *)v = temp;
	local_irq_restore(flags);
#endif

	return temp;
}

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

static inline int atomic_sub_return(int i, atomic_t *v)
{
	unsigned long temp;

#ifdef CONFIG_CPU_SH4A
	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_sub_return	\n"
"	sub	%2, %0					\n"
"	movco.l	%0, @%3					\n"
"	bf	1b					\n"
"	synco						\n"
	: "=&z" (temp), "=r" (&v->counter)
	: "r" (i), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	temp = *(long *)v;
	temp -= i;
	*(long *)v = temp;
	local_irq_restore(flags);
#endif

	return temp;
}

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
	int ret;
	unsigned long flags;

	local_irq_save(flags);
	ret = v->counter;
	if (likely(ret == old))
		v->counter = new;
	local_irq_restore(flags);

	return ret;
}

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
	int ret;
	unsigned long flags;

	local_irq_save(flags);
	ret = v->counter;
	if (ret != u)
		v->counter += a;
	local_irq_restore(flags);

	return ret != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
	unsigned long tmp;

	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_clear_mask	\n"
"	and	%2, %0					\n"
"	movco.l	%0, @%3					\n"
"	bf	1b					\n"
	: "=&z" (tmp), "=r" (&v->counter)
	: "r" (~mask), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	*(long *)v &= ~mask;
	local_irq_restore(flags);
#endif
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
	unsigned long tmp;

	__asm__ __volatile__ (
"1:	movli.l @%3, %0		! atomic_set_mask	\n"
"	or	%2, %0					\n"
"	movco.l	%0, @%3					\n"
"	bf	1b					\n"
	: "=&z" (tmp), "=r" (&v->counter)
	: "r" (mask), "r" (&v->counter)
	: "t");
#else
	unsigned long flags;

	local_irq_save(flags);
	*(long *)v |= mask;
	local_irq_restore(flags);
#endif
}

/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec()	barrier()
#define smp_mb__after_atomic_dec()	barrier()
#define smp_mb__before_atomic_inc()	barrier()
#define smp_mb__after_atomic_inc()	barrier()

#include <asm-generic/atomic.h>
#endif /* __ASM_SH_ATOMIC_H */