summaryrefslogtreecommitdiff
path: root/arch/hexagon/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/hexagon/include/asm')
-rw-r--r--arch/hexagon/include/asm/atomic.h28
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h4
-rw-r--r--arch/hexagon/include/asm/pgtable.h4
3 files changed, 16 insertions, 20 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 4ab895d7111f..6e94f8d04146 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */
-static inline void atomic_set(atomic_t *v, int new)
+static inline void arch_atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
@@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
- * atomic_read - reads a word, atomically
+ * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
- * atomic_xchg - atomic
+ * arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
- * atomic_cmpxchg - atomic compare-and-exchange values
+ * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
@@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* "old" is "expected" old val, __oldval is actual old value
*/
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
@@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
}
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int output; \
\
@@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
@@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
@@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 92b8a02e588a..cdb705e1496a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*
@@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting.
*/
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index dbb22b80b8c4..18cd6ea9ab23 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -155,9 +155,6 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
-/* Seems to be zero even in architectures where the zero page is firewalled? */
-#define FIRST_USER_ADDRESS 0UL
-
/* HUGETLB not working currently */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
@@ -242,7 +239,6 @@ static inline int pmd_bad(pmd_t pmd)
* pmd_page - converts a PMD entry to a page pointer
*/
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/**
* pte_none - check if pte is mapped