diff options
Diffstat (limited to 'arch/sh/include')
-rw-r--r-- | arch/sh/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/mutex-llsc.h | 109 | ||||
-rw-r--r-- | arch/sh/include/asm/mutex.h | 12 | ||||
-rw-r--r-- | arch/sh/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/tlb.h | 15 |
5 files changed, 10 insertions, 129 deletions
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 9f417feaf6e8..35ffdd081d26 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -10,7 +10,7 @@ #ifdef __KERNEL__ #include <cpu/mmu_context.h> #include <asm/tlbflush.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/io.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h deleted file mode 100644 index dad29b687bd3..000000000000 --- a/arch/sh/include/asm/mutex-llsc.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * arch/sh/include/asm/mutex-llsc.h - * - * SH-4A optimized mutex locking primitives - * - * Please look into asm-generic/mutex-xchg.h for a formal definition. - */ -#ifndef __ASM_SH_MUTEX_LLSC_H -#define __ASM_SH_MUTEX_LLSC_H - -/* - * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. - * with a bastardized atomic decrement (it is not a reliable atomic decrement - * but it satisfies the defined semantics for our purpose, while being - * smaller and faster than a real atomic decrement or atomic swap. - * The idea is to attempt decrementing the lock value only once. If once - * decremented it isn't zero, or if its store-back fails due to a dispute - * on the exclusive store, we simply bail out immediately through the slow - * path where the lock will be reattempted until it succeeds. - */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __done, __res; - - __asm__ __volatile__ ( - "movli.l @%2, %0 \n" - "add #-1, %0 \n" - "movco.l %0, @%2 \n" - "movt %1 \n" - : "=&z" (__res), "=&r" (__done) - : "r" (&(count)->counter) - : "t"); - - if (unlikely(!__done || __res != 0)) - fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count) -{ - int __done, __res; - - __asm__ __volatile__ ( - "movli.l @%2, %0 \n" - "add #-1, %0 \n" - "movco.l %0, @%2 \n" - "movt %1 \n" - : "=&z" (__res), "=&r" (__done) - : "r" (&(count)->counter) - : "t"); - - if (unlikely(!__done || __res != 0)) - __res = -1; - - return __res; -} - -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __done, __res; - - __asm__ __volatile__ ( - "movli.l @%2, %0 \n\t" - "add #1, %0 \n\t" - "movco.l %0, @%2 \n\t" - "movt %1 \n\t" - : "=&z" (__res), "=&r" (__done) - : "r" (&(count)->counter) - : "t"); - - if (unlikely(!__done || __res <= 0)) - fail_fn(count); -} - -/* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. - */ -#define __mutex_slowpath_needs_to_unlock() 1 - -/* - * For __mutex_fastpath_trylock we do an atomic decrement and check the - * result and put it in the __res variable. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __res, __orig; - - __asm__ __volatile__ ( - "1: movli.l @%2, %0 \n\t" - "dt %0 \n\t" - "movco.l %0,@%2 \n\t" - "bf 1b \n\t" - "cmp/eq #0,%0 \n\t" - "bt 2f \n\t" - "mov #0, %1 \n\t" - "bf 3f \n\t" - "2: mov #1, %1 \n\t" - "3: " - : "=&z" (__orig), "=&r" (__res) - : "r" (&count->counter) - : "t"); - - return __res; -} -#endif /* __ASM_SH_MUTEX_LLSC_H */ diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h deleted file mode 100644 index d8e37716a4a0..000000000000 --- a/arch/sh/include/asm/mutex.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ -#if defined(CONFIG_CPU_SH4A) -#include <asm/mutex-llsc.h> -#else -#include <asm-generic/mutex-dec.h> -#endif diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index f9a09942a32d..5addd69f70ef 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[]; #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") #define cpu_relax() barrier() -#define cpu_relax_lowlatency() cpu_relax() void default_idle(void); void stop_this_cpu(void *); diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 025cdb1032f6..46e0d635e36f 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -65,6 +65,9 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) tlb->end = address + PAGE_SIZE; } +#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + tlb_remove_tlb_entry(tlb, ptep, address) + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, @@ -115,18 +118,18 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, return __tlb_remove_page(tlb, page); } -static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, - struct page *page) -{ - return __tlb_remove_page(tlb, page); -} - static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { return tlb_remove_page(tlb, page); } +#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change +static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, + unsigned int page_size) +{ +} + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |