summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 16:26:05 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 11:33:09 +0300
commit1a472c9dba6b9646fd36717968f6a531b4441c7d (patch)
tree3cab56eaa3a25ff717b38f4a712d430b48a78fb3 /arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
parent676012a66f651a98808459bc8ab75661828ed96f (diff)
downloadlinux-1a472c9dba6b9646fd36717968f6a531b4441c7d.tar.xz
powerpc/mm/radix: Add tlbflush routines
Core kernel doesn't track the page size of the VA range that we are invalidating. Hence we end up flushing TLB for the entire mm here. Later patches will improve this. We also don't flush page walk cache separetly instead use RIC=2 when flushing TLB, because we do a MMU gather flush after freeing page table. MMU_NO_CONTEXT is updated for hash. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/tlbflush-hash.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index cc092ea0387c..f12ddf5e8de5 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -1,8 +1,6 @@
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
-#define MMU_NO_CONTEXT 0
-
/*
* TLB flushing for 64-bit hash-MMU CPUs
*/
@@ -29,14 +27,21 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
static inline void arch_enter_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch;
+ if (radix_enabled())
+ return;
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch;
+
+ if (radix_enabled())
+ return;
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->index)
__flush_tlb_pending(batch);