From 66d99b883419b8df6d0a24ca957da7ab4831cf6e Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 10 Feb 2010 01:03:06 +0000 Subject: powerpc: Convert open coded native hashtable bit lock Now we have real bit locks use them instead of open coding it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_native_64.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/mm/hash_native_64.c') diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a1b105..9e1aa4f99fac 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) unsigned long *word = &hptep->v; while (1) { - if (!test_and_set_bit(HPTE_LOCK_BIT, word)) + if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) break; while(test_bit(HPTE_LOCK_BIT, word)) cpu_relax(); @@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; - asm volatile("lwsync":::"memory"); - clear_bit(HPTE_LOCK_BIT, word); + clear_bit_unlock(HPTE_LOCK_BIT, word); } static long native_hpte_insert(unsigned long hpte_group, unsigned long va, -- cgit v1.2.3 From 6b9c9b8a66f7696ce392d34f90d4d6aa7045f51d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 18 Feb 2010 02:22:35 +0000 Subject: powerpc: Convert native_tlbie_lock to raw_spinlock native_tlbie_lock needs to be a real spinlock in RT. Convert it to raw_spinlock. Signed-off-by: Thomas Gleixner Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_native_64.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/mm/hash_native_64.c') diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 9e1aa4f99fac..784a400e0781 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -37,7 +37,7 @@ #define HPTE_LOCK_BIT 3 -static DEFINE_SPINLOCK(native_tlbie_lock); +static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static inline void __tlbie(unsigned long va, int psize, int ssize) { @@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(va, psize, ssize); @@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } static inline void native_lock_hpte(struct hash_pte *hptep) @@ -433,7 +433,7 @@ static void native_hpte_clear(void) /* we take the tlbie lock and hold it. Some hardware will * deadlock if we try to tlbie from two processors at once. */ - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); slots = pteg_count * HPTES_PER_GROUP; @@ -457,7 +457,7 @@ static void native_hpte_clear(void) } asm volatile("eieio; tlbsync; ptesync":::"memory"); - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); local_irq_restore(flags); } @@ -520,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local) int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -535,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } local_irq_restore(flags); -- cgit v1.2.3