From 70267a7f47a5565519da5ca2dde35d3ebf7d6ffb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 25 Jul 2012 21:19:50 +0000 Subject: powerpc/mm: Replace abs_to_virt() with __va() abs_to_virt() is just a wrapper around __va(), call __va() directly. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 377e5cbedbbb..ba45739bdfe8 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -651,7 +650,7 @@ static void __init htab_initialize(void) DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); - htab_address = abs_to_virt(table); + htab_address = __va(table); /* htab absolute addr + encoded htabsize */ _SDR1 = table + __ilog2(pteg_count) - 11; -- cgit v1.2.3 From d88dc13a2463cac2a909d6b8570b7d47f88f1b6e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 25 Jul 2012 21:19:58 +0000 Subject: powerpc/mm: Remove uses of abs_to_virt() and virt_to_abs() These days they are just __va() and __pa() respectively. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/stab.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 9106ebb118f5..3f8efa6f2997 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -20,7 +20,6 @@ #include #include #include -#include struct stab_entry { unsigned long esid_data; @@ -257,7 +256,7 @@ void __init stabs_alloc(void) memset((void *)newstab, 0, HW_PAGE_SIZE); paca[cpu].stab_addr = newstab; - paca[cpu].stab_real = virt_to_abs(newstab); + paca[cpu].stab_real = __pa(newstab); printk(KERN_INFO "Segment table for CPU %d at 0x%llx " "virtual, 0x%llx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); -- cgit v1.2.3 From beacc6da8649f5c0841ac9b326dcf0c4dad823cd Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 25 Jul 2012 21:20:03 +0000 Subject: powerpc: Remove all includes of It's empty now, apart from other includes. Fixup a few files that were getting things via this header. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/dma-swiotlb.c | 2 +- arch/powerpc/kernel/ibmebus.c | 1 - arch/powerpc/kernel/vio.c | 1 - arch/powerpc/mm/hash_native_64.c | 2 +- arch/powerpc/mm/init_64.c | 1 - arch/powerpc/mm/pgtable_64.c | 1 - arch/powerpc/platforms/pasemi/iommu.c | 1 + arch/powerpc/platforms/powernv/pci-ioda.c | 1 - arch/powerpc/platforms/powernv/pci-p5ioc2.c | 1 - arch/powerpc/platforms/powernv/pci.c | 1 - arch/powerpc/platforms/pseries/iommu.c | 1 + arch/powerpc/platforms/pseries/lpar.c | 1 - 12 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 46943651da23..a720b54b971c 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -20,7 +21,6 @@ #include #include #include -#include unsigned int ppc_swiotlb_enable; diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index b01d14eeca8d..8220baa46faf 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -47,7 +47,6 @@ #include #include #include -#include static struct device ibmebus_bus_device = { /* fake "parent" device */ .init_name = "ibmebus", diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 02b32216bbc3..201ba59738be 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 90039bc64119..f21e8ce8db33 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -14,10 +14,10 @@ #include #include +#include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 620b7acd2fdf..95a45293e5ac 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -62,7 +62,6 @@ #include #include #include -#include #include #include "mmu_decl.h" diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 249a0631c4db..297d49547ea8 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include "mmu_decl.h" diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index f03fbc2b0f59..7d2d036754b5 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -19,6 +19,7 @@ #undef DEBUG +#include #include #include #include diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9cda6a1ad0cf..5e75dcfe51b9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "powernv.h" #include "pci.h" diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 264967770c3a..6b4bef4e9d82 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "powernv.h" #include "pci.h" diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index be3cfc5ceabb..1e908aef4201 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "powernv.h" diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index e150093d417a..6153eea27ce7 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include /* for show_stack */ #include diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5f3ef876ded2..177055d0186b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 41ab5266c3622354353433618edb92ab278025fa Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Thu, 23 Aug 2012 21:27:09 +0000 Subject: powerpc: Add trap_nr to thread_struct Add thread_struct.trap_nr and use it to store the last exception the thread experienced. In this patch, we populate the field at various places where we force_sig_info() to the process. This is also used in uprobes to determine if the probed instruction caused an exception. Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/processor.h | 1 + arch/powerpc/kernel/process.c | 2 ++ arch/powerpc/kernel/traps.c | 1 + arch/powerpc/mm/fault.c | 1 + 4 files changed, 5 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 53b6dfa83344..e0f6710d1983 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -219,6 +219,7 @@ struct thread_struct { #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif unsigned long dabr; /* Data address breakpoint register */ + unsigned long trap_nr; /* last trap # on this thread */ #ifdef CONFIG_ALTIVEC /* Complete AltiVec register set */ vector128 vr[32] __attribute__((aligned(16))); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 710f400476de..8e701a4ed36b 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, { siginfo_t info; + current->thread.trap_nr = signal_code; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; @@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address, { siginfo_t info; + current->thread.trap_nr = TRAP_HWBKPT; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 158972341a2d..8831322962d3 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); + current->thread.trap_nr = code; memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 08ffcf52a856..995f924e007f 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address) up_read(¤t->mm->mmap_sem); if (user_mode(regs)) { + current->thread.trap_nr = BUS_ADRERR; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; -- cgit v1.2.3 From fecff0f7243edaea33071865ee5e35839be44f10 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 6 Aug 2012 03:27:05 +0000 Subject: powerpc/booke64: Add DO_KVM kernel hooks Hook DO_KVM macro into 64-bit booke for KVM integration. Extend interrupt handlers' parameter list with interrupt vector numbers to accomodate the macro. Only the bolted version of tlb miss handers is addressed now. Signed-off-by: Mihai Caraman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/exceptions-64e.S | 93 +++++++++++++++++++++++------------- arch/powerpc/mm/tlb_low_64e.S | 14 ++++-- 2 files changed, 70 insertions(+), 37 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 0f58a95fd216..3b1ad1ba8c48 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -25,6 +25,8 @@ #include #include #include +#include +#include /* XXX This will ultimately add space for a special exception save * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... @@ -35,12 +37,14 @@ #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE /* Exception prolog code for all exceptions */ -#define EXCEPTION_PROLOG(n, type, addition) \ +#define EXCEPTION_PROLOG(n, intnum, type, addition) \ mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ std r10,PACA_EX##type+EX_R10(r13); \ std r11,PACA_EX##type+EX_R11(r13); \ mfcr r10; /* save CR */ \ + mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ + DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ addition; /* additional code for that exc. */ \ std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ @@ -81,20 +85,20 @@ #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 -#define NORMAL_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) +#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) -#define CRIT_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) +#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) -#define DBG_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) +#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) -#define MC_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, MC, addition##_MC(n)) +#define MC_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) -#define GDBELL_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, GDBELL, addition##_GDBELL(n)) +#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) /* Variants of the "addition" argument for the prolog */ @@ -240,9 +244,9 @@ exc_##n##_bad_stack: \ 1: -#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ +#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ START_EXCEPTION(label); \ - NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ + NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ ack(r8); \ CHECK_NAPPING(); \ @@ -293,7 +297,8 @@ interrupt_end_book3e: /* Critical Input Interrupt */ START_EXCEPTION(critical_input); - CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, + PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -304,7 +309,8 @@ interrupt_end_book3e: /* Machine Check Interrupt */ START_EXCEPTION(machine_check); - MC_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) + MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK, + PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) // bl special_reg_save_mc // addi r3,r1,STACK_FRAME_OVERHEAD @@ -315,7 +321,8 @@ interrupt_end_book3e: /* Data Storage Interrupt */ START_EXCEPTION(data_storage) - NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, + PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) @@ -323,18 +330,21 @@ interrupt_end_book3e: /* Instruction Storage Interrupt */ START_EXCEPTION(instruction_storage); - NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, + PROLOG_ADDITION_2REGS) li r15,0 mr r14,r10 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) b storage_fault_common /* External Input Interrupt */ - MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) + MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, + external_input, .do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); - NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, + PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) @@ -342,7 +352,8 @@ interrupt_end_book3e: /* Program Interrupt */ START_EXCEPTION(program); - NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) + NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, + PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) std r14,_DSISR(r1) @@ -354,7 +365,8 @@ interrupt_end_book3e: /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); - NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, + PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) ld r12,_MSR(r1) @@ -369,14 +381,17 @@ interrupt_end_book3e: b .ret_from_except /* Decrementer Interrupt */ - MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) + MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, + decrementer, .timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ - MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) + MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, + fixed_interval, .unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); - CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, + PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -395,7 +410,8 @@ interrupt_end_book3e: /* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); - NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, + PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD @@ -404,7 +420,8 @@ interrupt_end_book3e: /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); - CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) + CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, + PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an @@ -469,7 +486,8 @@ kernel_dbg_exc: /* Debug exception as a debug interrupt*/ START_EXCEPTION(debug_debug); - DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) + DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, + PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an @@ -530,18 +548,21 @@ kernel_dbg_exc: b .ret_from_except START_EXCEPTION(perfmon); - NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, + PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) addi r3,r1,STACK_FRAME_OVERHEAD bl .performance_monitor_exception b .ret_from_except_lite /* Doorbell interrupt */ - MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) + MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, + doorbell, .doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); - CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, + PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -555,7 +576,8 @@ kernel_dbg_exc: * This general exception use GSRRx save/restore registers */ START_EXCEPTION(guest_doorbell); - GDBELL_EXCEPTION_PROLOG(0x2c0, PROLOG_ADDITION_NONE) + GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, + PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs @@ -565,7 +587,8 @@ kernel_dbg_exc: /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); - CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, + PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -576,7 +599,8 @@ kernel_dbg_exc: /* Hypervisor call */ START_EXCEPTION(hypercall); - NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, + PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs @@ -586,7 +610,8 @@ kernel_dbg_exc: /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); - NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, + PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index f09d48e3268d..d884fa44d34f 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -20,6 +20,8 @@ #include #include #include +#include +#include #ifdef CONFIG_PPC_64K_PAGES #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) @@ -37,12 +39,18 @@ * * **********************************************************************/ -.macro tlb_prolog_bolted addr +.macro tlb_prolog_bolted intnum addr mtspr SPRN_SPRG_TLB_SCRATCH,r13 mfspr r13,SPRN_SPRG_PACA std r10,PACA_EXTLB+EX_TLB_R10(r13) mfcr r10 std r11,PACA_EXTLB+EX_TLB_R11(r13) +#ifdef CONFIG_KVM_BOOKE_HV +BEGIN_FTR_SECTION + mfspr r11, SPRN_SRR1 +END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) +#endif + DO_KVM \intnum, SPRN_SRR1 std r16,PACA_EXTLB+EX_TLB_R16(r13) mfspr r16,\addr /* get faulting address */ std r14,PACA_EXTLB+EX_TLB_R14(r13) @@ -66,7 +74,7 @@ /* Data TLB miss */ START_EXCEPTION(data_tlb_miss_bolted) - tlb_prolog_bolted SPRN_DEAR + tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ @@ -214,7 +222,7 @@ itlb_miss_fault_bolted: /* Instruction TLB miss */ START_EXCEPTION(instruction_tlb_miss_bolted) - tlb_prolog_bolted SPRN_SRR0 + tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 srdi r15,r16,60 /* get region */ -- cgit v1.2.3 From 8b64a9dfb091f1eca8b7e58da82f1e7d1d5fe0ad Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 6 Aug 2012 03:27:07 +0000 Subject: powerpc/booke64: Use SPRG0/3 scratch for bolted TLB miss & crit int Embedded.Hypervisor category defines GSPRG0..3 physical registers for guests. Avoid SPRG4-7 usage as scratch in host exception handlers, otherwise guest SPRG4-7 registers will be clobbered. For bolted TLB miss exception handlers, which is the version currently supported by KVM, use SPRN_SPRG_GEN_SCRATCH aka SPRG0 instead of SPRN_SPRG_TLB_SCRATCH aka SPRG6. Keep using TLB PACA slots to fit in one 64-byte cache line. For critical exception handlers use SPRG3 instead of SPRG7. Provide a routine to store and restore user-visible SPRGs. This will be subsequently used to restore VDSO information in SPRG3. Add EX_R13 to paca slots to free up SPRG3 and change the critical exception epilog to use it. Signed-off-by: Mihai Caraman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/exception-64e.h | 5 +++-- arch/powerpc/include/asm/reg.h | 5 +++-- arch/powerpc/kernel/exceptions-64e.S | 17 +++++++++++++++-- arch/powerpc/mm/tlb_low_64e.S | 4 ++-- 4 files changed, 23 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index e73452f09019..51fa43e536b9 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -46,8 +46,9 @@ #define EX_CR (1 * 8) #define EX_R10 (2 * 8) #define EX_R11 (3 * 8) -#define EX_R14 (4 * 8) -#define EX_R15 (5 * 8) +#define EX_R13 (4 * 8) +#define EX_R14 (5 * 8) +#define EX_R15 (6 * 8) /* * The TLB miss exception uses different slots. diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c195bf0e6241..a391244c18e1 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -761,7 +761,8 @@ * 64-bit embedded * - SPRG0 generic exception scratch * - SPRG2 TLB exception stack - * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) + * - SPRG3 critical exception scratch and + * CPU and NUMA node for VDSO getcpu (user visible) * - SPRG4 unused (user visible) * - SPRG6 TLB miss scratch (user visible, sorry !) * - SPRG7 critical exception scratch @@ -858,7 +859,7 @@ #ifdef CONFIG_PPC_BOOK3E_64 #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 -#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 +#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3 #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 83c20e84a4fb..7476b0ae09fb 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -42,6 +42,7 @@ mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ std r10,PACA_EX##type+EX_R10(r13); \ std r11,PACA_EX##type+EX_R11(r13); \ + PROLOG_STORE_RESTORE_SCRATCH_##type; \ mfcr r10; /* save CR */ \ mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ @@ -99,6 +100,18 @@ #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) +/* + * Store user-visible scratch in PACA exception slots and restore proper value + */ +#define PROLOG_STORE_RESTORE_SCRATCH_GEN +#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL +#define PROLOG_STORE_RESTORE_SCRATCH_DBG +#define PROLOG_STORE_RESTORE_SCRATCH_MC + +#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \ + mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \ + std r10,PACA_EXCRIT+EX_R13(r13) + /* Variants of the "addition" argument for the prolog */ #define PROLOG_ADDITION_NONE_GEN(n) @@ -454,7 +467,7 @@ interrupt_end_book3e: mtcr r10 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ ld r11,PACA_EXCRIT+EX_R11(r13) - mfspr r13,SPRN_SPRG_CRIT_SCRATCH + ld r13,PACA_EXCRIT+EX_R13(r13) rfci /* Normal debug exception */ @@ -467,7 +480,7 @@ interrupt_end_book3e: /* Now we mash up things to make it look like we are coming on a * normal exception */ - mfspr r15,SPRN_SPRG_CRIT_SCRATCH + ld r15,PACA_EXCRIT+EX_R13(r13) mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index d884fa44d34f..b4113bf86353 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -40,7 +40,7 @@ **********************************************************************/ .macro tlb_prolog_bolted intnum addr - mtspr SPRN_SPRG_TLB_SCRATCH,r13 + mtspr SPRN_SPRG_GEN_SCRATCH,r13 mfspr r13,SPRN_SPRG_PACA std r10,PACA_EXTLB+EX_TLB_R10(r13) mfcr r10 @@ -69,7 +69,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ld r15,PACA_EXTLB+EX_TLB_R15(r13) TLB_MISS_RESTORE_STATS_BOLTED ld r16,PACA_EXTLB+EX_TLB_R16(r13) - mfspr r13,SPRN_SPRG_TLB_SCRATCH + mfspr r13,SPRN_SPRG_GEN_SCRATCH .endm /* Data TLB miss */ -- cgit v1.2.3 From a84fcd46870113e92523e1ebb9a0ec75f66e03a2 Mon Sep 17 00:00:00 2001 From: Suzuki Poulose Date: Tue, 21 Aug 2012 01:42:33 +0000 Subject: powerpc: Change memory_limit from phys_addr_t to unsigned long long There are some device-tree nodes, whose values are of type phys_addr_t. The phys_addr_t is variable sized based on the CONFIG_PHSY_T_64BIT. Change these to a fixed unsigned long long for consistency. This patch does the change only for memory_limit. The following is a list of such variables which need the change: 1) kernel_end, crashk_size - in arch/powerpc/kernel/machine_kexec.c 2) (struct resource *)crashk_res.start - We could export a local static variable from machine_kexec.c. Changing the above values might break the kexec-tools. So, I will fix kexec-tools first to handle the different sized values and then change the above. Suggested-by: Benjamin Herrenschmidt Signed-off-by: Suzuki K. Poulose Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/fadump.c | 3 +-- arch/powerpc/kernel/machine_kexec.c | 2 +- arch/powerpc/kernel/prom.c | 4 ++-- arch/powerpc/mm/mem.c | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d084ce195fc3..8b9a306260b2 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -9,7 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern int init_bootmem_done; /* set once bootmem is available */ -extern phys_addr_t memory_limit; +extern unsigned long long memory_limit; extern unsigned long klimit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 18bdf74fa164..06c8202a69cf 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void) else memory_limit = memblock_end_of_DRAM(); printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" - " dump, now %#016llx\n", - (unsigned long long)memory_limit); + " dump, now %#016llx\n", memory_limit); } if (memory_limit) memory_boundary = memory_limit; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 5df777794403..4074eff1e744 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -165,7 +165,7 @@ void __init reserve_crashkernel(void) if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", - (unsigned long long)memory_limit); + memory_limit); } printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f191bf02943a..37725e86651e 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p) return 1; memory_limit = PAGE_ALIGN(memparse(p, &p)); - DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); + DBG("memory limit = 0x%llx\n", memory_limit); return 0; } @@ -661,7 +661,7 @@ void __init early_init_devtree(void *params) /* make sure we've parsed cmdline for mem= before this */ if (memory_limit) - first_memblock_size = min(first_memblock_size, memory_limit); + first_memblock_size = min_t(u64, first_memblock_size, memory_limit); setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index fbdad0e3929a..44cf2b20503d 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -62,7 +62,7 @@ int init_bootmem_done; int mem_init_done; -phys_addr_t memory_limit; +unsigned long long memory_limit; #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; -- cgit v1.2.3 From 6b5e7229bbd59f0cfce7015fd46736fc93d8c8c3 Mon Sep 17 00:00:00 2001 From: Joe MacDonald Date: Tue, 21 Aug 2012 08:22:28 +0000 Subject: powerpc/mm: Match variable types to API sys_subpage_prot() takes an unsigned long for 'addr' then does some stuff with it and the result is stored in a signed int, i, which is eventually used as the size parameter in a copy_from_user call. Update 'i' to be an unsigned long as well and since 'nw' is used in a size_t context which, depending on whether this is 32- or 64-bit may be unsigned int or unsigned long, switch that to a size_t and always be right. Finally, since we're in the neighbourhood, make the same changes to subpage_prot_clear(). Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Joe MacDonald Signed-off-by: Paul Gortmaker Acked-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/subpage-prot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index e4f8f1fc81a5..7c415ddde948 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; - int i, nw; + unsigned long i; + size_t nw; unsigned long next, limit; down_write(&mm->mmap_sem); @@ -144,7 +145,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; - int i, nw; + unsigned long i; + size_t nw; unsigned long next, limit; int err; -- cgit v1.2.3 From 688ba1dbee8a49a2efe507cd9ae69634d92bb640 Mon Sep 17 00:00:00 2001 From: Jia Hongtao Date: Fri, 3 Aug 2012 18:14:10 +0800 Subject: powerpc/swiotlb: Enable at early stage and disable if not necessary Remove the dependency on PCI initialization for SWIOTLB initialization. So that PCI can be initialized at proper time. SWIOTLB is partly determined by PCI inbound/outbound map which is assigned in PCI initialization. But swiotlb_init() should be done at the stage of mem_init() which is much earlier than PCI initialization. So we reserve the memory for SWIOTLB first and free it if not necessary. All boards are converted to fit this change. Signed-off-by: Jia Hongtao Signed-off-by: Li Yang Acked-by: Tony Breeds Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/swiotlb.h | 6 ++++++ arch/powerpc/kernel/dma-swiotlb.c | 20 ++++++++++++++++++++ arch/powerpc/mm/mem.c | 3 +-- arch/powerpc/platforms/44x/currituck.c | 10 ++-------- arch/powerpc/platforms/85xx/mpc85xx_ds.c | 1 + arch/powerpc/platforms/85xx/qemu_e500.c | 1 + arch/powerpc/sysdev/fsl_pci.c | 5 +---- 7 files changed, 32 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index 8979d4cd3d70..de99d6e29430 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -22,4 +22,10 @@ int __init swiotlb_setup_bus_notifier(void); extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev); +#ifdef CONFIG_SWIOTLB +void swiotlb_detect_4g(void); +#else +static inline void swiotlb_detect_4g(void) {} +#endif + #endif /* __ASM_SWIOTLB_H */ diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index a720b54b971c..bd1a2aba599f 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void) &ppc_swiotlb_plat_bus_notifier); return 0; } + +void swiotlb_detect_4g(void) +{ + if ((memblock_end_of_DRAM() - 1) > 0xffffffff) + ppc_swiotlb_enable = 1; +} + +static int __init swiotlb_late_init(void) +{ + if (ppc_swiotlb_enable) { + swiotlb_print_info(); + set_pci_dma_ops(&swiotlb_dma_ops); + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; + } else { + swiotlb_free(); + } + + return 0; +} +subsys_initcall(swiotlb_late_init); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 44cf2b20503d..0dba5066c22a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -300,8 +300,7 @@ void __init mem_init(void) unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; #ifdef CONFIG_SWIOTLB - if (ppc_swiotlb_enable) - swiotlb_init(1); + swiotlb_init(0); #endif num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c index 9f6c33d63a42..6bd89a0e0dea 100644 --- a/arch/powerpc/platforms/44x/currituck.c +++ b/arch/powerpc/platforms/44x/currituck.c @@ -21,7 +21,6 @@ */ #include -#include #include #include #include @@ -159,13 +158,8 @@ static void __init ppc47x_setup_arch(void) /* No need to check the DMA config as we /know/ our windows are all of * RAM. Lets hope that doesn't change */ -#ifdef CONFIG_SWIOTLB - if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { - ppc_swiotlb_enable = 1; - set_pci_dma_ops(&swiotlb_dma_ops); - ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; - } -#endif + swiotlb_detect_4g(); + ppc47x_smp_init(); } diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 6d3265fe7718..56f8c8f674df 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -159,6 +159,7 @@ static void __init mpc85xx_ds_setup_arch(void) if (ppc_md.progress) ppc_md.progress("mpc85xx_ds_setup_arch()", 0); + swiotlb_detect_4g(); mpc85xx_ds_pci_init(); mpc85xx_smp_init(); diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 95a2e53af71b..3c5490c8423f 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -42,6 +42,7 @@ static void __init qemu_e500_setup_arch(void) ppc_md.progress("qemu_e500_setup_arch()", 0); fsl_pci_init(); + swiotlb_detect_4g(); mpc85xx_smp_init(); } diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 6938792ccfd9..da7a3d7f54cc 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -872,11 +872,8 @@ void __devinit fsl_pci_init(void) * we need SWIOTLB to handle buffers located outside of * dma capable memory region */ - if (memblock_end_of_DRAM() - 1 > max) { + if (memblock_end_of_DRAM() - 1 > max) ppc_swiotlb_enable = 1; - set_pci_dma_ops(&swiotlb_dma_ops); - ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; - } #endif } #endif -- cgit v1.2.3 From f03839236392f5bc6107af5c9f749e8a48cd616c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:47 +0000 Subject: powerpc/mm: Replace open coded CONTEXT_BITS value To clarify the meaning for future readers, replace the open coded 19 with CONTEXT_BITS Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/mmu_context_hash64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40677aa0190e..daa076c9025c 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -34,7 +34,7 @@ static DEFINE_IDA(mmu_context_ida); * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). */ -#define MAX_CONTEXT ((1UL << 19) - 1) +#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) int __init_new_context(void) { -- cgit v1.2.3 From dcda287a9b26309ae43a091d0ecde16f8f61b4c0 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:49 +0000 Subject: powerpc/mm: Simplify hpte_decode This patch simplify hpte_decode for easy switching of virtual address to virtual page number in the later patch Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_native_64.c | 49 +++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 21 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index f21e8ce8db33..ebf685af4af2 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -351,9 +351,10 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *ssize, unsigned long *va) { + unsigned long avpn, pteg, vpi; unsigned long hpte_r = hpte->r; unsigned long hpte_v = hpte->v; - unsigned long avpn; + unsigned long vsid, seg_off; int i, size, shift, penc; if (!(hpte_v & HPTE_V_LARGE)) @@ -380,32 +381,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, } /* This works for all page sizes, and for 256M and 1T segments */ + *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; shift = mmu_psize_defs[size].shift; - avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23; - - if (shift < 23) { - unsigned long vpi, vsid, pteg; - pteg = slot / HPTES_PER_GROUP; - if (hpte_v & HPTE_V_SECONDARY) - pteg = ~pteg; - switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { - case MMU_SEGSIZE_256M: - vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; - break; - case MMU_SEGSIZE_1T: - vsid = avpn >> 40; + avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); + pteg = slot / HPTES_PER_GROUP; + if (hpte_v & HPTE_V_SECONDARY) + pteg = ~pteg; + + switch (*ssize) { + case MMU_SEGSIZE_256M: + /* We only have 28 - 23 bits of seg_off in avpn */ + seg_off = (avpn & 0x1f) << 23; + vsid = avpn >> 5; + /* We can find more bits from the pteg value */ + if (shift < 23) { + vpi = (vsid ^ pteg) & htab_hash_mask; + seg_off |= vpi << shift; + } + *va = vsid << SID_SHIFT | seg_off; + case MMU_SEGSIZE_1T: + /* We only have 40 - 23 bits of seg_off in avpn */ + seg_off = (avpn & 0x1ffff) << 23; + vsid = avpn >> 17; + if (shift < 23) { vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; - break; - default: - avpn = vpi = size = 0; + seg_off |= vpi << shift; } - avpn |= (vpi << mmu_psize_defs[size].shift); + *va = vsid << SID_SHIFT_1T | seg_off; + default: + *va = size = 0; } - - *va = avpn; *psize = size; - *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; } /* -- cgit v1.2.3 From 5524a27d39b68770f203d8d42eb5a95dde4933bc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:50 +0000 Subject: powerpc/mm: Convert virtual address to vpn This patch convert different functions to take virtual page number instead of virtual address. Virtual page number is virtual address shifted right by VPN_SHIFT (12) bits. This enable us to have an address range of upto 76 bits. Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/kvm_book3s.h | 2 +- arch/powerpc/include/asm/machdep.h | 6 +- arch/powerpc/include/asm/mmu-hash64.h | 78 ++++++++++++++----- arch/powerpc/include/asm/pte-hash64-64k.h | 18 +++-- arch/powerpc/include/asm/tlbflush.h | 4 +- arch/powerpc/kvm/book3s_32_mmu_host.c | 8 +- arch/powerpc/kvm/book3s_64_mmu_host.c | 17 +++-- arch/powerpc/kvm/trace.h | 14 ++-- arch/powerpc/mm/hash_low_64.S | 97 +++++++++++++++--------- arch/powerpc/mm/hash_native_64.c | 121 ++++++++++++++++++------------ arch/powerpc/mm/hash_utils_64.c | 30 ++++---- arch/powerpc/mm/hugetlbpage-hash64.c | 15 ++-- arch/powerpc/mm/tlb_hash64.c | 11 +-- arch/powerpc/platforms/cell/beat_htab.c | 45 +++++------ arch/powerpc/platforms/ps3/htab.c | 22 +++--- arch/powerpc/platforms/pseries/lpar.c | 76 ++++++++----------- 16 files changed, 324 insertions(+), 240 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index f0e0c6a66d97..7aefdb3e1ce4 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -59,7 +59,7 @@ struct hpte_cache { struct hlist_node list_vpte; struct hlist_node list_vpte_long; struct rcu_head rcu_head; - u64 host_va; + u64 host_vpn; u64 pfn; ulong slot; struct kvmppc_pte pte; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8111e1b78f7f..c4231973edd3 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -34,19 +34,19 @@ struct machdep_calls { char *name; #ifdef CONFIG_PPC64 void (*hpte_invalidate)(unsigned long slot, - unsigned long va, + unsigned long vpn, int psize, int ssize, int local); long (*hpte_updatepp)(unsigned long slot, unsigned long newpp, - unsigned long va, + unsigned long vpn, int psize, int ssize, int local); void (*hpte_updateboltedpp)(unsigned long newpp, unsigned long ea, int psize, int ssize); long (*hpte_insert)(unsigned long hpte_group, - unsigned long va, + unsigned long vpn, unsigned long prpn, unsigned long rflags, unsigned long vflags, diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 1c65a59881ea..6aeb4986a373 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -154,9 +154,25 @@ struct mmu_psize_def #define MMU_SEGSIZE_256M 0 #define MMU_SEGSIZE_1T 1 +/* + * encode page number shift. + * in order to fit the 78 bit va in a 64 bit variable we shift the va by + * 12 bits. This enable us to address upto 76 bit va. + * For hpt hash from a va we can ignore the page size bits of va and for + * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure + * we work in all cases including 4k page size. + */ +#define VPN_SHIFT 12 #ifndef __ASSEMBLY__ +static inline int segment_shift(int ssize) +{ + if (ssize == MMU_SEGSIZE_256M) + return SID_SHIFT; + return SID_SHIFT_1T; +} + /* * The current system page and segment sizes */ @@ -179,19 +195,40 @@ extern unsigned long tce_alloc_start, tce_alloc_end; */ extern int mmu_ci_restrictions; +/* + * This computes the AVPN and B fields of the first dword of a HPTE, + * for use when we want to match an existing PTE. The bottom 7 bits + * of the returned value are zero. + */ +static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, + int ssize) +{ + unsigned long v; + /* + * The AVA field omits the low-order 23 bits of the 78 bits VA. + * These bits are not needed in the PTE, because the + * low-order b of these bits are part of the byte offset + * into the virtual page and, if b < 23, the high-order + * 23-b of these bits are always used in selecting the + * PTEGs to be searched + */ + v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); + v <<= HPTE_V_AVPN_SHIFT; + v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; + return v; +} + /* * This function sets the AVPN and L fields of the HPTE appropriately * for the page size */ -static inline unsigned long hpte_encode_v(unsigned long va, int psize, - int ssize) +static inline unsigned long hpte_encode_v(unsigned long vpn, + int psize, int ssize) { unsigned long v; - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); - v <<= HPTE_V_AVPN_SHIFT; + v = hpte_encode_avpn(vpn, psize, ssize); if (psize != MMU_PAGE_4K) v |= HPTE_V_LARGE; - v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; return v; } @@ -216,30 +253,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize) } /* - * Build a VA given VSID, EA and segment size + * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. */ -static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, - int ssize) +static inline unsigned long hpt_vpn(unsigned long ea, + unsigned long vsid, int ssize) { - if (ssize == MMU_SEGSIZE_256M) - return (vsid << 28) | (ea & 0xfffffffUL); - return (vsid << 40) | (ea & 0xffffffffffUL); + unsigned long mask; + int s_shift = segment_shift(ssize); + + mask = (1ul << (s_shift - VPN_SHIFT)) - 1; + return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); } /* * This hashes a virtual address */ - -static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, - int ssize) +static inline unsigned long hpt_hash(unsigned long vpn, + unsigned int shift, int ssize) { + int mask; unsigned long hash, vsid; + /* VPN_SHIFT can be atmost 12 */ if (ssize == MMU_SEGSIZE_256M) { - hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); + mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; + hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ + ((vpn & mask) >> (shift - VPN_SHIFT)); } else { - vsid = va >> 40; - hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); + mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; + vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); + hash = vsid ^ (vsid << 25) ^ + ((vpn & mask) >> (shift - VPN_SHIFT)) ; } return hash & 0x7fffffffffUL; } diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 59247e816ac5..eedf427c9124 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -58,14 +58,16 @@ /* Trick: we set __end to va + 64k, which happens works for * a 16M page as well as we want only one iteration */ -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - unsigned long __end = va + PAGE_SIZE; \ - unsigned __split = (psize == MMU_PAGE_4K || \ - psize == MMU_PAGE_64K_AP); \ - shift = mmu_psize_defs[psize].shift; \ - for (index = 0; va < __end; index++, va += (1L << shift)) { \ - if (!__split || __rpte_sub_valid(rpte, index)) do { \ +#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ + do { \ + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; vpn < __end; index++, \ + vpn += (1L << (shift - VPN_SHIFT))) { \ + if (!__split || __rpte_sub_valid(rpte, index)) \ + do { #define pte_iterate_hashed_end() } while(0); } } while(0) diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 81143fcbd113..fc02d1dee955 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -95,7 +95,7 @@ struct ppc64_tlb_batch { unsigned long index; struct mm_struct *mm; real_pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned long vpn[PPC64_TLB_BATCH_NR]; unsigned int psize; int ssize; }; @@ -127,7 +127,7 @@ static inline void arch_leave_lazy_mmu_mode(void) #define arch_flush_lazy_mmu_mode() do {} while (0) -extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, +extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, int local); extern void flush_hash_range(unsigned long number, int local); diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 837f13e7b6bf..00aa61268e0d 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -141,7 +141,7 @@ extern char etext[]; int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) { pfn_t hpaddr; - u64 va; + u64 vpn; u64 vsid; struct kvmppc_sid_map *map; volatile u32 *pteg; @@ -173,7 +173,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) BUG_ON(!map); vsid = map->host_vsid; - va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); + vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT) next_pteg: if (rr == 16) { @@ -244,11 +244,11 @@ next_pteg: dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', orig_pte->may_execute ? 'x' : '-', - orig_pte->eaddr, (ulong)pteg, va, + orig_pte->eaddr, (ulong)pteg, vpn, orig_pte->vpage, hpaddr); pte->slot = (ulong)&pteg[rr]; - pte->host_va = va; + pte->host_vpn = vpn; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 0688b6b39585..4d72f9ebc554 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -33,7 +33,7 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { - ppc_md.hpte_invalidate(pte->slot, pte->host_va, + ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, MMU_PAGE_4K, MMU_SEGSIZE_256M, false); } @@ -80,8 +80,9 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) { + unsigned long vpn; pfn_t hpaddr; - ulong hash, hpteg, va; + ulong hash, hpteg; u64 vsid; int ret; int rflags = 0x192; @@ -117,7 +118,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) } vsid = map->host_vsid; - va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); + vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); if (!orig_pte->may_write) rflags |= HPTE_R_PP; @@ -129,7 +130,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) else kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); - hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); + hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); map_again: hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); @@ -141,7 +142,8 @@ map_again: goto out; } - ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); + ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, + MMU_PAGE_4K, MMU_SEGSIZE_256M); if (ret < 0) { /* If we couldn't map a primary PTE, try a secondary */ @@ -152,7 +154,8 @@ map_again: } else { struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); - trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); + trace_kvm_book3s_64_mmu_map(rflags, hpteg, + vpn, hpaddr, orig_pte); /* The ppc_md code may give us a secondary entry even though we asked for a primary. Fix up. */ @@ -162,7 +165,7 @@ map_again: } pte->slot = hpteg + (ret & 7); - pte->host_va = va; + pte->host_vpn = vpn; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 877186b7b1c3..ddb6a2149d44 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -189,7 +189,7 @@ TRACE_EVENT(kvm_book3s_mmu_map, TP_ARGS(pte), TP_STRUCT__entry( - __field( u64, host_va ) + __field( u64, host_vpn ) __field( u64, pfn ) __field( ulong, eaddr ) __field( u64, vpage ) @@ -198,7 +198,7 @@ TRACE_EVENT(kvm_book3s_mmu_map, ), TP_fast_assign( - __entry->host_va = pte->host_va; + __entry->host_vpn = pte->host_vpn; __entry->pfn = pte->pfn; __entry->eaddr = pte->pte.eaddr; __entry->vpage = pte->pte.vpage; @@ -208,8 +208,8 @@ TRACE_EVENT(kvm_book3s_mmu_map, (pte->pte.may_execute ? 0x1 : 0); ), - TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", - __entry->host_va, __entry->pfn, __entry->eaddr, + TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", + __entry->host_vpn, __entry->pfn, __entry->eaddr, __entry->vpage, __entry->raddr, __entry->flags) ); @@ -218,7 +218,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate, TP_ARGS(pte), TP_STRUCT__entry( - __field( u64, host_va ) + __field( u64, host_vpn ) __field( u64, pfn ) __field( ulong, eaddr ) __field( u64, vpage ) @@ -227,7 +227,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate, ), TP_fast_assign( - __entry->host_va = pte->host_va; + __entry->host_vpn = pte->host_vpn; __entry->pfn = pte->pfn; __entry->eaddr = pte->pte.eaddr; __entry->vpage = pte->pte.vpage; @@ -238,7 +238,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate, ), TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", - __entry->host_va, __entry->pfn, __entry->eaddr, + __entry->host_vpn, __entry->pfn, __entry->eaddr, __entry->vpage, __entry->raddr, __entry->flags) ); diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 602aeb06d298..56585086413a 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K) /* Save non-volatile registers. * r31 will hold "old PTE" * r30 is "new PTE" - * r29 is "va" + * r29 is vpn * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) */ @@ -111,10 +111,10 @@ BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc va and put it in r29 */ - rldicr r29,r5,28,63-28 - rldicl r3,r3,0,36 - or r29,r3,r29 + /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT - VPN_SHIFT + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) + or r29,r28,r29 /* Calculate hash value for primary slot and store it in r28 */ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ @@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) xor r28,r5,r0 b 4f -3: /* Calc VA and hash in r29 and r28 for 1T segment */ - sldi r29,r5,40 /* vsid << 40 */ - clrldi r3,r3,24 /* ea & 0xffffffffff */ +3: /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) + or r29,r28,r29 + + /* + * calculate hash value for primary slot and + * store it in r28 for 1T segment + */ rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ clrldi r5,r5,40 /* vsid & 0xffffff */ rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ xor r28,r28,r5 - or r29,r3,r29 /* VA */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -185,7 +190,7 @@ htab_insert_pte: /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1) /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -278,7 +283,7 @@ htab_modify_pte: add r3,r0,r3 /* add slot idx */ /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* va */ + mr r5,r29 /* vpn */ li r6,MMU_PAGE_4K /* page size */ ld r7,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R8)(r1) /* get "local" param */ @@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K) /* Save non-volatile registers. * r31 will hold "old PTE" * r30 is "new PTE" - * r29 is "va" + * r29 is vpn * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) * r26 is the hidx mask @@ -394,10 +399,14 @@ BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc va and put it in r29 */ - rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ - rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ - or r29,r3,r29 /* r29 = va */ + /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT - VPN_SHIFT + /* + * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff + * srdi r28,r3,VPN_SHIFT + */ + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) + or r29,r28,r29 /* Calculate hash value for primary slot and store it in r28 */ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ @@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) xor r28,r5,r0 b 4f -3: /* Calc VA and hash in r29 and r28 for 1T segment */ - sldi r29,r5,40 /* vsid << 40 */ - clrldi r3,r3,24 /* ea & 0xffffffffff */ +3: /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT + /* + * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff + * srdi r28,r3,VPN_SHIFT + */ + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) + or r29,r28,r29 + + /* + * Calculate hash value for primary slot and + * store it in r28 for 1T segment + */ rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ clrldi r5,r5,40 /* vsid & 0xffffff */ rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ xor r28,r28,r5 - or r29,r3,r29 /* VA */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -488,7 +506,7 @@ htab_special_pfn: /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1) /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove) * useless now that the segment has been switched to 4k pages. */ htab_inval_old_hpte: - mr r3,r29 /* virtual addr */ + mr r3,r29 /* vpn */ mr r4,r31 /* PTE.pte */ li r5,0 /* PTE.hidx */ li r6,MMU_PAGE_64K /* psize */ @@ -620,7 +638,7 @@ htab_modify_pte: add r3,r0,r3 /* add slot idx */ /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* va */ + mr r5,r29 /* vpn */ li r6,MMU_PAGE_4K /* page size */ ld r7,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R8)(r1) /* get "local" param */ @@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K) /* Save non-volatile registers. * r31 will hold "old PTE" * r30 is "new PTE" - * r29 is "va" + * r29 is vpn * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) */ @@ -729,10 +747,10 @@ BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc va and put it in r29 */ - rldicr r29,r5,28,63-28 - rldicl r3,r3,0,36 - or r29,r3,r29 + /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT - VPN_SHIFT + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) + or r29,r28,r29 /* Calculate hash value for primary slot and store it in r28 */ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ @@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) xor r28,r5,r0 b 4f -3: /* Calc VA and hash in r29 and r28 for 1T segment */ - sldi r29,r5,40 /* vsid << 40 */ - clrldi r3,r3,24 /* ea & 0xffffffffff */ +3: /* Calc vpn and put it in r29 */ + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) + or r29,r28,r29 + + /* + * calculate hash value for primary slot and + * store it in r28 for 1T segment + */ rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ clrldi r5,r5,40 /* vsid & 0xffffff */ rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ xor r28,r28,r5 - or r29,r3,r29 /* VA */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -806,7 +829,7 @@ ht64_insert_pte: /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1) /* Call ppc_md.hpte_insert */ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve va */ + mr r4,r29 /* Retrieve vpn */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARAM(R9)(r1) /* segment size */ @@ -899,7 +922,7 @@ ht64_modify_pte: add r3,r0,r3 /* add slot idx */ /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* va */ + mr r5,r29 /* vpn */ li r6,MMU_PAGE_64K ld r7,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R8)(r1) /* get "local" param */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index ebf685af4af2..a4a1c728f269 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -39,22 +39,35 @@ DEFINE_RAW_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long va, int psize, int ssize) +static inline void __tlbie(unsigned long vpn, int psize, int ssize) { + unsigned long va; unsigned int penc; - /* clear top 16 bits, non SLS segment */ + /* + * We need 14 to 65 bits of va for a tlibe of 4K page + * With vpn we ignore the lower VPN_SHIFT bits already. + * And top two bits are already ignored because we can + * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT + * of 12. + */ + va = vpn << VPN_SHIFT; + /* + * clear top 16 bits of 64bit va, non SLS segment + * Older versions of the architecture (2.02 and earler) require the + * masking of the top 16 bits. + */ va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: - va &= ~0xffful; va |= ssize << 8; asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : "memory"); break; default: + /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; @@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) } } -static inline void __tlbiel(unsigned long va, int psize, int ssize) +static inline void __tlbiel(unsigned long vpn, int psize, int ssize) { + unsigned long va; unsigned int penc; - /* clear top 16 bits, non SLS segment */ + /* VPN_SHIFT can be atmost 12 */ + va = vpn << VPN_SHIFT; + /* + * clear top 16 bits of 64 bit va, non SLS segment + * Older versions of the architecture (2.02 and earler) require the + * masking of the top 16 bits. + */ va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: - va &= ~0xffful; va |= ssize << 8; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); break; default: + /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; @@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) } -static inline void tlbie(unsigned long va, int psize, int ssize, int local) +static inline void tlbie(unsigned long vpn, int psize, int ssize, int local) { unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); @@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { - __tlbiel(va, psize, ssize); + __tlbiel(vpn, psize, ssize); asm volatile("ptesync": : :"memory"); } else { - __tlbie(va, psize, ssize); + __tlbie(vpn, psize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) @@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) clear_bit_unlock(HPTE_LOCK_BIT, word); } -static long native_hpte_insert(unsigned long hpte_group, unsigned long va, +static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { @@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, int i; if (!(vflags & HPTE_V_BOLTED)) { - DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," + DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," " rflags=%lx, vflags=%lx, psize=%d)\n", - hpte_group, va, pa, rflags, vflags, psize); + hpte_group, vpn, pa, rflags, vflags, psize); } for (i = 0; i < HPTES_PER_GROUP; i++) { @@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, if (i == HPTES_PER_GROUP) return -1; - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) { @@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group) } static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int psize, int ssize, + unsigned long vpn, int psize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0; - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); - DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", - va, want_v & HPTE_V_AVPN, slot, newpp); + DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", + vpn, want_v & HPTE_V_AVPN, slot, newpp); native_lock_hpte(hptep); @@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, ssize, local); + tlbie(vpn, psize, ssize, local); return ret; } -static long native_hpte_find(unsigned long va, int psize, int ssize) +static long native_hpte_find(unsigned long vpn, int psize, int ssize) { struct hash_pte *hptep; unsigned long hash; @@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize) long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); - want_v = hpte_encode_v(va, psize, ssize); + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); /* Bolted mappings are only ever in the primary group */ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; @@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize) static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - unsigned long vsid, va; + unsigned long vpn; + unsigned long vsid; long slot; struct hash_pte *hptep; vsid = get_kernel_vsid(ea, ssize); - va = hpt_va(ea, vsid, ssize); + vpn = hpt_vpn(ea, vsid, ssize); - slot = native_hpte_find(va, psize, ssize); + slot = native_hpte_find(vpn, psize, ssize); if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; @@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, (newpp & (HPTE_R_PP | HPTE_R_N)); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, ssize, 0); + tlbie(vpn, psize, ssize, 0); } -static void native_hpte_invalidate(unsigned long slot, unsigned long va, +static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; @@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, local_irq_save(flags); - DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); + DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; @@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, hptep->v = 0; /* Invalidate the TLB */ - tlbie(va, psize, ssize, local); + tlbie(vpn, psize, ssize, local); local_irq_restore(flags); } @@ -349,7 +370,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) static void hpte_decode(struct hash_pte *hpte, unsigned long slot, - int *psize, int *ssize, unsigned long *va) + int *psize, int *ssize, unsigned long *vpn) { unsigned long avpn, pteg, vpi; unsigned long hpte_r = hpte->r; @@ -399,7 +420,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, vpi = (vsid ^ pteg) & htab_hash_mask; seg_off |= vpi << shift; } - *va = vsid << SID_SHIFT | seg_off; + *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; case MMU_SEGSIZE_1T: /* We only have 40 - 23 bits of seg_off in avpn */ seg_off = (avpn & 0x1ffff) << 23; @@ -408,9 +429,9 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; seg_off |= vpi << shift; } - *va = vsid << SID_SHIFT_1T | seg_off; + *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; default: - *va = size = 0; + *vpn = size = 0; } *psize = size; } @@ -425,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, */ static void native_hpte_clear(void) { + unsigned long vpn = 0; unsigned long slot, slots, flags; struct hash_pte *hptep = htab_address; - unsigned long hpte_v, va; + unsigned long hpte_v; unsigned long pteg_count; int psize, ssize; @@ -455,9 +477,9 @@ static void native_hpte_clear(void) * already hold the native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { - hpte_decode(hptep, slot, &psize, &ssize, &va); + hpte_decode(hptep, slot, &psize, &ssize, &vpn); hptep->v = 0; - __tlbie(va, psize, ssize); + __tlbie(vpn, psize, ssize); } } @@ -472,7 +494,8 @@ static void native_hpte_clear(void) */ static void native_flush_hash_range(unsigned long number, int local) { - unsigned long va, hash, index, hidx, shift, slot; + unsigned long vpn; + unsigned long hash, index, hidx, shift, slot; struct hash_pte *hptep; unsigned long hpte_v; unsigned long want_v; @@ -486,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local) local_irq_save(flags); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift, ssize); + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { + hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; if (!HPTE_V_COMPARE(hpte_v, want_v) || @@ -512,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local) mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, - shift) { - __tlbiel(va, psize, ssize); + pte_iterate_hashed_subpages(pte, psize, + vpn, index, shift) { + __tlbiel(vpn, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("ptesync":::"memory"); @@ -529,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, - shift) { - __tlbie(va, psize, ssize); + pte_iterate_hashed_subpages(pte, psize, + vpn, index, shift) { + __tlbie(vpn, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("eieio; tlbsync; ptesync":::"memory"); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index ba45739bdfe8..7d4ffd79cc82 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -191,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); - unsigned long va = hpt_va(vaddr, vsid, ssize); + unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; - hash = hpt_hash(va, shift, ssize); + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); BUG_ON(!ppc_md.hpte_insert); - ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, + ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, ssize); if (ret < 0) @@ -1152,21 +1152,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* WARNING: This is called from hash_low_64.S, if you change this prototype, * do not forget to update the assembly call site ! */ -void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, +void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, int local) { unsigned long hash, index, shift, hidx, slot; - DBG_LOW("flush_hash_page(va=%016lx)\n", va); - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift, ssize); + DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { + hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); - ppc_md.hpte_invalidate(slot, va, psize, ssize, local); + ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local); } pte_iterate_hashed_end(); } @@ -1180,7 +1180,7 @@ void flush_hash_range(unsigned long number, int local) &__get_cpu_var(ppc64_tlb_batch); for (i = 0; i < number; i++) - flush_hash_page(batch->vaddr[i], batch->pte[i], + flush_hash_page(batch->vpn[i], batch->pte[i], batch->psize, batch->ssize, local); } } @@ -1207,14 +1207,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); - unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); int ret; - hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), + ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); BUG_ON (ret < 0); @@ -1228,9 +1228,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) { unsigned long hash, hidx, slot; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); - unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); - hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); spin_lock(&linear_map_hash_lock); BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); hidx = linear_map_hash_slots[lmi] & 0x7f; @@ -1240,7 +1240,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); + ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0); } void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index cc5c273086cf..cecad348f604 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, int local, int ssize, unsigned int shift, unsigned int mmu_psize) { + unsigned long vpn; unsigned long old_pte, new_pte; - unsigned long va, rflags, pa, sz; + unsigned long rflags, pa, sz; long slot; BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); /* Search the Linux page table for a match with va */ - va = hpt_va(ea, vsid, ssize); + vpn = hpt_vpn(ea, vsid, ssize); /* At this point, we have a pte (old_pte) which can be used to build * or update an HPTE. There are 2 cases: @@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, /* There MIGHT be an HPTE for this pte */ unsigned long hash, slot; - hash = hpt_hash(va, shift, ssize); + hash = hpt_hash(vpn, shift, ssize); if (old_pte & _PAGE_F_SECOND) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += (old_pte & _PAGE_F_GIX) >> 12; - if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, + if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, ssize, local) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & _PAGE_HASHPTE))) { - unsigned long hash = hpt_hash(va, shift, ssize); + unsigned long hash = hpt_hash(vpn, shift, ssize); unsigned long hpte_group; pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; @@ -101,14 +102,14 @@ repeat: _PAGE_COHERENT | _PAGE_GUARDED)); /* Insert into the hash table, primary slot */ - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, mmu_psize, ssize); /* Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, mmu_psize, ssize); if (slot == -1) { diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 31f18207970b..ae758b3ff72c 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { + unsigned long vpn; struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); - unsigned long vsid, vaddr; + unsigned long vsid; unsigned int psize; int ssize; real_pte_t rpte; @@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } - vaddr = hpt_va(addr, vsid, ssize); + vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); /* @@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, * and decide to use local invalidates instead... */ if (!batch->active) { - flush_hash_page(vaddr, rpte, psize, ssize, 0); + flush_hash_page(vpn, rpte, psize, ssize, 0); put_cpu_var(ppc64_tlb_batch); return; } @@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, batch->ssize = ssize; } batch->pte[i] = rpte; - batch->vaddr[i] = vaddr; + batch->vpn[i] = vpn; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); @@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) if (cpumask_equal(mm_cpumask(batch->mm), tmp)) local = 1; if (i == 1) - flush_hash_page(batch->vaddr[0], batch->pte[0], + flush_hash_page(batch->vpn[0], batch->pte[0], batch->psize, batch->ssize, local); else flush_hash_range(i, local); diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index b83077e13161..0f6f83988b3d 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group) } static long beat_lpar_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long pa, + unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { @@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group, "rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, va, pa, rflags, vflags, psize); - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | + hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; @@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void) */ static long beat_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, + unsigned long vpn, int psize, int ssize, int local) { unsigned long lpar_rc; u64 dummy0, dummy1; unsigned long want_v; - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); DBG_LOW(" update: " "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", @@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot, return 0; } -static long beat_lpar_hpte_find(unsigned long va, int psize) +static long beat_lpar_hpte_find(unsigned long vpn, int psize) { unsigned long hash; unsigned long i, j; long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); for (j = 0; j < 2; j++) { slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; @@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - unsigned long lpar_rc, slot, vsid, va; + unsigned long vpn; + unsigned long lpar_rc, slot, vsid; u64 dummy0, dummy1; vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); - va = hpt_va(ea, vsid, MMU_SEGSIZE_256M); + vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M); raw_spin_lock(&beat_htab_lock); - slot = beat_lpar_hpte_find(va, psize); + slot = beat_lpar_hpte_find(vpn, psize); BUG_ON(slot == -1); lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, @@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, BUG_ON(lpar_rc != 0); } -static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, +static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { unsigned long want_v; @@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", slot, va, psize, local); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); raw_spin_lock_irqsave(&beat_htab_lock, flags); dummy1 = beat_lpar_hpte_getword0(slot); @@ -311,7 +312,7 @@ void __init hpte_init_beat(void) } static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, - unsigned long va, unsigned long pa, + unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { @@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, return -1; if (!(vflags & HPTE_V_BOLTED)) - DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " + DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, " "rflags=%lx, vflags=%lx, psize=%d)\n", - hpte_group, va, pa, rflags, vflags, psize); + hpte_group, vpn, pa, rflags, vflags, psize); - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | + hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; @@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, */ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, unsigned long newpp, - unsigned long va, + unsigned long vpn, int psize, int ssize, int local) { unsigned long lpar_rc; unsigned long want_v; unsigned long pss; - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; DBG_LOW(" update: " @@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, return 0; } -static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, +static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long pss; - DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", - slot, va, psize, local); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", + slot, vpn, psize, local); + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index 3124cf791ebb..d00d7b0a3bda 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -43,7 +43,7 @@ enum ps3_lpar_vas_id { static DEFINE_SPINLOCK(ps3_htab_lock); -static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, +static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { @@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, */ vflags &= ~HPTE_V_SECONDARY; - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; spin_lock_irqsave(&ps3_htab_lock, flags); @@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, if (result) { /* all entries bolted !*/ - pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", - __func__, result, va, pa, hpte_group, hpte_v, hpte_r); + pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n", + __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r); BUG(); } @@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group) } static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int psize, int ssize, int local) + unsigned long vpn, int psize, int ssize, int local) { int result; u64 hpte_v, want_v, hpte_rs; @@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long flags; long ret; - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); spin_lock_irqsave(&ps3_htab_lock, flags); @@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, &hpte_rs); if (result) { - pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", - __func__, result, va, slot, psize); + pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n", + __func__, result, vpn, slot, psize); BUG(); } @@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, panic("ps3_hpte_updateboltedpp() not implemented"); } -static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, +static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { unsigned long flags; @@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); if (result) { - pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", - __func__, result, va, slot, psize); + pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n", + __func__, result, vpn, slot, psize); BUG(); } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 177055d0186b..0da39fed355a 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -107,9 +107,9 @@ void vpa_init(int cpu) } static long pSeries_lpar_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long pa, - unsigned long rflags, unsigned long vflags, - int psize, int ssize) + unsigned long vpn, unsigned long pa, + unsigned long rflags, unsigned long vflags, + int psize, int ssize) { unsigned long lpar_rc; unsigned long flags; @@ -117,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long hpte_v, hpte_r; if (!(vflags & HPTE_V_BOLTED)) - pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " - "rflags=%lx, vflags=%lx, psize=%d)\n", - hpte_group, va, pa, rflags, vflags, psize); + pr_devel("hpte_insert(group=%lx, vpn=%016lx, " + "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", + hpte_group, vpn, pa, rflags, vflags, psize); - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) @@ -225,22 +225,6 @@ static void pSeries_lpar_hptab_clear(void) } } -/* - * This computes the AVPN and B fields of the first dword of a HPTE, - * for use when we want to match an existing PTE. The bottom 7 bits - * of the returned value are zero. - */ -static inline unsigned long hpte_encode_avpn(unsigned long va, int psize, - int ssize) -{ - unsigned long v; - - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); - v <<= HPTE_V_AVPN_SHIFT; - v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; - return v; -} - /* * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and * the low 3 bits of flags happen to line up. So no transform is needed. @@ -249,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize, */ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, + unsigned long vpn, int psize, int ssize, int local) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; unsigned long want_v; - want_v = hpte_encode_avpn(va, psize, ssize); + want_v = hpte_encode_avpn(vpn, psize, ssize); pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", want_v, slot, flags, psize); @@ -294,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) return dword0; } -static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) +static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) { unsigned long hash; unsigned long i; long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); - want_v = hpte_encode_avpn(va, psize, ssize); + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); + want_v = hpte_encode_avpn(vpn, psize, ssize); /* Bolted entries are always in the primary group */ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; @@ -322,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - unsigned long lpar_rc, slot, vsid, va, flags; + unsigned long vpn; + unsigned long lpar_rc, slot, vsid, flags; vsid = get_kernel_vsid(ea, ssize); - va = hpt_va(ea, vsid, ssize); + vpn = hpt_vpn(ea, vsid, ssize); - slot = pSeries_lpar_hpte_find(va, psize, ssize); + slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); flags = newpp & 7; @@ -336,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, BUG_ON(lpar_rc != H_SUCCESS); } -static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, +static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long dummy1, dummy2; - pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", - slot, va, psize, local); + pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", + slot, vpn, psize, local); - want_v = hpte_encode_avpn(va, psize, ssize); + want_v = hpte_encode_avpn(vpn, psize, ssize); lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); if (lpar_rc == H_NOT_FOUND) return; @@ -357,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, static void pSeries_lpar_hpte_removebolted(unsigned long ea, int psize, int ssize) { - unsigned long slot, vsid, va; + unsigned long vpn; + unsigned long slot, vsid; vsid = get_kernel_vsid(ea, ssize); - va = hpt_va(ea, vsid, ssize); + vpn = hpt_vpn(ea, vsid, ssize); - slot = pSeries_lpar_hpte_find(va, psize, ssize); + slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); - pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); + pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0); } /* Flag bits for H_BULK_REMOVE */ @@ -381,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea, */ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) { + unsigned long vpn; unsigned long i, pix, rc; unsigned long flags = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[9]; - unsigned long va; unsigned long hash, index, shift, hidx, slot; real_pte_t pte; int psize, ssize; @@ -398,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) ssize = batch->ssize; pix = 0; for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift, ssize); + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { + hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { - pSeries_lpar_hpte_invalidate(slot, va, psize, + pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, local); } else { param[pix] = HBR_REQUEST | HBR_AVPN | slot; - param[pix+1] = hpte_encode_avpn(va, psize, + param[pix+1] = hpte_encode_avpn(vpn, psize, ssize); pix += 2; if (pix == 8) { -- cgit v1.2.3 From 7aa0727f3302931e698b3a7979ae5b9a4600da4e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:52 +0000 Subject: powerpc/mm: Increase the slice range to 64TB This patch makes the high psizes mask as an unsigned char array so that we can have more than 16TB. Currently we support upto 64TB Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu-hash64.h | 6 +- arch/powerpc/include/asm/page_64.h | 6 +- arch/powerpc/mm/hash_utils_64.c | 15 +++-- arch/powerpc/mm/slb_low.S | 30 +++++++--- arch/powerpc/mm/slice.c | 107 +++++++++++++++++++++------------- 5 files changed, 109 insertions(+), 55 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 6aeb4986a373..7cbd541f915f 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -460,7 +460,11 @@ typedef struct { #ifdef CONFIG_PPC_MM_SLICES u64 low_slices_psize; /* SLB page size encodings */ - u64 high_slices_psize; /* 4 bits per slice for now */ + /* + * Right now we support 64TB and 4 bits for each + * 1TB slice we need 32 bytes for 64TB. + */ + unsigned char high_slices_psize[32]; /* 4 bits per slice for now */ #else u16 sllp; /* SLB page size encoding */ #endif diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index fed85e6290e1..6c9bef4cb6a0 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -82,7 +82,11 @@ extern u64 ppc64_pft_size; struct slice_mask { u16 low_slices; - u16 high_slices; + /* + * This should be derived out of PGTABLE_RANGE. For the current + * max 64TB, u64 should be ok. + */ + u64 high_slices; }; struct mm_struct; diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7d4ffd79cc82..3a292be2e079 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -803,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) #ifdef CONFIG_PPC_MM_SLICES unsigned int get_paca_psize(unsigned long addr) { - unsigned long index, slices; + u64 lpsizes; + unsigned char *hpsizes; + unsigned long index, mask_index; if (addr < SLICE_LOW_TOP) { - slices = get_paca()->context.low_slices_psize; + lpsizes = get_paca()->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); - } else { - slices = get_paca()->context.high_slices_psize; - index = GET_HIGH_SLICE_INDEX(addr); + return (lpsizes >> (index * 4)) & 0xF; } - return (slices >> (index * 4)) & 0xF; + hpsizes = get_paca()->context.high_slices_psize; + index = GET_HIGH_SLICE_INDEX(addr); + mask_index = index & 0x1; + return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; } #else diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index b9ee79ce2200..e132dc6ed1a9 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -108,17 +108,31 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) * between 4k and 64k standard page size */ #ifdef CONFIG_PPC_MM_SLICES + /* r10 have esid */ cmpldi r10,16 - - /* Get the slice index * 4 in r11 and matching slice size mask in r9 */ - ld r9,PACALOWSLICESPSIZE(r13) - sldi r11,r10,2 + /* below SLICE_LOW_TOP */ blt 5f - ld r9,PACAHIGHSLICEPSIZE(r13) - srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) - andi. r11,r11,0x3c + /* + * Handle hpsizes, + * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index + */ + srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */ + addi r9,r11,PACAHIGHSLICEPSIZE + lbzx r9,r13,r9 /* r9 is hpsizes[r11] */ + /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */ + rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63 + b 6f -5: /* Extract the psize and multiply to get an array offset */ +5: + /* + * Handle lpsizes + * r9 is get_paca()->context.low_slices_psize, r11 is index + */ + ld r9,PACALOWSLICESPSIZE(r13) + mr r11,r10 +6: + sldi r11,r11,2 /* index * 4 */ + /* Extract the psize and multiply to get an array offset */ srd r9,r9,r11 andi. r9,r9,0xf mulli r9,r9,MMUPSIZEDEFSIZE diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 73709f7ce92c..b4e996a398bd 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -42,7 +42,7 @@ int _slice_debug = 1; static void slice_print_mask(const char *label, struct slice_mask mask) { - char *p, buf[16 + 3 + 16 + 1]; + char *p, buf[16 + 3 + 64 + 1]; int i; if (!_slice_debug) @@ -54,7 +54,7 @@ static void slice_print_mask(const char *label, struct slice_mask mask) *(p++) = '-'; *(p++) = ' '; for (i = 0; i < SLICE_NUM_HIGH; i++) - *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; + *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0'; *(p++) = 0; printk(KERN_DEBUG "%s:%s\n", label, buf); @@ -84,8 +84,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start, } if ((start + len) > SLICE_LOW_TOP) - ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) - - (1u << GET_HIGH_SLICE_INDEX(start)); + ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1)) + - (1ul << GET_HIGH_SLICE_INDEX(start)); return ret; } @@ -135,26 +135,31 @@ static struct slice_mask slice_mask_for_free(struct mm_struct *mm) for (i = 0; i < SLICE_NUM_HIGH; i++) if (!slice_high_has_vma(mm, i)) - ret.high_slices |= 1u << i; + ret.high_slices |= 1ul << i; return ret; } static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) { + unsigned char *hpsizes; + int index, mask_index; struct slice_mask ret = { 0, 0 }; unsigned long i; - u64 psizes; + u64 lpsizes; - psizes = mm->context.low_slices_psize; + lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) - if (((psizes >> (i * 4)) & 0xf) == psize) + if (((lpsizes >> (i * 4)) & 0xf) == psize) ret.low_slices |= 1u << i; - psizes = mm->context.high_slices_psize; - for (i = 0; i < SLICE_NUM_HIGH; i++) - if (((psizes >> (i * 4)) & 0xf) == psize) - ret.high_slices |= 1u << i; + hpsizes = mm->context.high_slices_psize; + for (i = 0; i < SLICE_NUM_HIGH; i++) { + mask_index = i & 0x1; + index = i >> 1; + if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) + ret.high_slices |= 1ul << i; + } return ret; } @@ -183,8 +188,10 @@ static void slice_flush_segments(void *parm) static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) { + int index, mask_index; /* Write the new slice psize bits */ - u64 lpsizes, hpsizes; + unsigned char *hpsizes; + u64 lpsizes; unsigned long i, flags; slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); @@ -201,14 +208,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz lpsizes = (lpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); - hpsizes = mm->context.high_slices_psize; - for (i = 0; i < SLICE_NUM_HIGH; i++) - if (mask.high_slices & (1u << i)) - hpsizes = (hpsizes & ~(0xful << (i * 4))) | - (((unsigned long)psize) << (i * 4)); - + /* Assign the value back */ mm->context.low_slices_psize = lpsizes; - mm->context.high_slices_psize = hpsizes; + + hpsizes = mm->context.high_slices_psize; + for (i = 0; i < SLICE_NUM_HIGH; i++) { + mask_index = i & 0x1; + index = i >> 1; + if (mask.high_slices & (1ul << i)) + hpsizes[index] = (hpsizes[index] & + ~(0xf << (mask_index * 4))) | + (((unsigned long)psize) << (mask_index * 4)); + } slice_dbg(" lsps=%lx, hsps=%lx\n", mm->context.low_slices_psize, @@ -587,18 +598,19 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) { - u64 psizes; - int index; + unsigned char *hpsizes; + int index, mask_index; if (addr < SLICE_LOW_TOP) { - psizes = mm->context.low_slices_psize; + u64 lpsizes; + lpsizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); - } else { - psizes = mm->context.high_slices_psize; - index = GET_HIGH_SLICE_INDEX(addr); + return (lpsizes >> (index * 4)) & 0xf; } - - return (psizes >> (index * 4)) & 0xf; + hpsizes = mm->context.high_slices_psize; + index = GET_HIGH_SLICE_INDEX(addr); + mask_index = index & 0x1; + return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf; } EXPORT_SYMBOL_GPL(get_slice_psize); @@ -618,7 +630,9 @@ EXPORT_SYMBOL_GPL(get_slice_psize); */ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) { - unsigned long flags, lpsizes, hpsizes; + int index, mask_index; + unsigned char *hpsizes; + unsigned long flags, lpsizes; unsigned int old_psize; int i; @@ -639,15 +653,21 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) if (((lpsizes >> (i * 4)) & 0xf) == old_psize) lpsizes = (lpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); + /* Assign the value back */ + mm->context.low_slices_psize = lpsizes; hpsizes = mm->context.high_slices_psize; - for (i = 0; i < SLICE_NUM_HIGH; i++) - if (((hpsizes >> (i * 4)) & 0xf) == old_psize) - hpsizes = (hpsizes & ~(0xful << (i * 4))) | - (((unsigned long)psize) << (i * 4)); + for (i = 0; i < SLICE_NUM_HIGH; i++) { + mask_index = i & 0x1; + index = i >> 1; + if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) + hpsizes[index] = (hpsizes[index] & + ~(0xf << (mask_index * 4))) | + (((unsigned long)psize) << (mask_index * 4)); + } + + - mm->context.low_slices_psize = lpsizes; - mm->context.high_slices_psize = hpsizes; slice_dbg(" lsps=%lx, hsps=%lx\n", mm->context.low_slices_psize, @@ -660,18 +680,27 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) void slice_set_psize(struct mm_struct *mm, unsigned long address, unsigned int psize) { + unsigned char *hpsizes; unsigned long i, flags; - u64 *p; + u64 *lpsizes; spin_lock_irqsave(&slice_convert_lock, flags); if (address < SLICE_LOW_TOP) { i = GET_LOW_SLICE_INDEX(address); - p = &mm->context.low_slices_psize; + lpsizes = &mm->context.low_slices_psize; + *lpsizes = (*lpsizes & ~(0xful << (i * 4))) | + ((unsigned long) psize << (i * 4)); } else { + int index, mask_index; i = GET_HIGH_SLICE_INDEX(address); - p = &mm->context.high_slices_psize; + hpsizes = mm->context.high_slices_psize; + mask_index = i & 0x1; + index = i >> 1; + hpsizes[index] = (hpsizes[index] & + ~(0xf << (mask_index * 4))) | + (((unsigned long)psize) << (mask_index * 4)); } - *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); + spin_unlock_irqrestore(&slice_convert_lock, flags); #ifdef CONFIG_SPU_BASE -- cgit v1.2.3 From ac8dc2823a30a2d166fed6e919ab2e576f8fca84 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:53 +0000 Subject: powerpc/mm: Use the required number of VSID bits in slbmte ASM_VSID_SCRAMBLE can leave non-zero bits in the high 28 bits of the result for 256MB segment (40 bits for 1T segment). Properly mask them before using the values in slbmte Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/slb_low.S | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index e132dc6ed1a9..3b75f19aaa22 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user) */ slb_finish_load: ASM_VSID_SCRAMBLE(r10,r9,256M) - rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ + /* + * bits above VSID_BITS_256M need to be ignored from r10 + * also combine VSID and flags + */ + rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M)) /* r3 = EA, r11 = VSID data */ /* @@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size) slb_finish_load_1T: srdi r10,r10,40-28 /* get 1T ESID */ ASM_VSID_SCRAMBLE(r10,r9,1T) - rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ + /* + * bits above VSID_BITS_1T need to be ignored from r10 + * also combine VSID and flags + */ + rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T)) li r10,MMU_SEGSIZE_1T rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ -- cgit v1.2.3 From 735cafc32b661f08a266a8d754e6cfbd82c11704 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:54 +0000 Subject: powerpc/mm: Use 32bit array for slb cache With larger vsid we need to track more bits of ESID in slb cache for slb invalidate. Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/mm/slb_low.S | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 7796519fd238..e9e7a6999bb8 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -100,7 +100,7 @@ struct paca_struct { /* SLB related definitions */ u16 vmalloc_sllp; u16 slb_cache_ptr; - u16 slb_cache[SLB_CACHE_ENTRIES]; + u32 slb_cache[SLB_CACHE_ENTRIES]; #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC_BOOK3E diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 3b75f19aaa22..f6a262555ef2 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -270,10 +270,10 @@ _GLOBAL(slb_compare_rr_to_size) bge 1f /* still room in the slb cache */ - sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ - rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ - add r11,r11,r13 /* r11 = (u16 *)paca + offset */ - sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ + sldi r11,r3,2 /* r11 = offset * sizeof(u32) */ + srdi r10,r10,28 /* get the 36 bits of the ESID */ + add r11,r11,r13 /* r11 = (u32 *)paca + offset */ + stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ addi r3,r3,1 /* offset++ */ b 2f 1: /* offset >= SLB_CACHE_ENTRIES */ -- cgit v1.2.3 From 048ee0993ec8360abb0b51bdf8f8721e9ed62ec4 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:55 +0000 Subject: powerpc/mm: Add 64TB support Increase max addressable range to 64TB. This is not tested on real hardware yet. Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu-hash64.h | 42 ++++++++++++++++++++++------ arch/powerpc/include/asm/pgtable-ppc64-4k.h | 2 +- arch/powerpc/include/asm/pgtable-ppc64-64k.h | 2 +- arch/powerpc/include/asm/processor.h | 4 +-- arch/powerpc/include/asm/sparsemem.h | 4 +-- arch/powerpc/kernel/exceptions-64s.S | 4 ++- arch/powerpc/mm/slb_low.S | 12 ++++++++ 7 files changed, 54 insertions(+), 16 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 7cbd541f915f..23730ee65aa0 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -370,17 +370,21 @@ extern void slb_set_size(u16 size); * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. */ -#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ -#define VSID_BITS_256M 36 +/* + * This should be computed such that protovosid * vsid_mulitplier + * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus + */ +#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ +#define VSID_BITS_256M 38 #define VSID_MODULUS_256M ((1UL<= PAGE_OFFSET */ +/* + * This is only valid for addresses >= PAGE_OFFSET + * The proto-VSID space is divided into two class + * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 + * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 + * + * With KERNEL_START at 0xc000000000000000, the proto vsid for + * the kernel ends up with 0xc00000000 (36 bits). With 64TB + * support we need to have kernel proto-VSID in the + * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. + */ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) { - if (ssize == MMU_SEGSIZE_256M) - return vsid_scramble(ea >> SID_SHIFT, 256M); - return vsid_scramble(ea >> SID_SHIFT_1T, 1T); + unsigned long proto_vsid; + /* + * We need to make sure proto_vsid for the kernel is + * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) + */ + if (ssize == MMU_SEGSIZE_256M) { + proto_vsid = ea >> SID_SHIFT; + proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); + return vsid_scramble(proto_vsid, 256M); + } + proto_vsid = ea >> SID_SHIFT_1T; + proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); + return vsid_scramble(proto_vsid, 1T); } /* Returns the segment size indicator for a user address */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index d6489a2c64c8..12798c9d4b4b 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h @@ -7,7 +7,7 @@ */ #define PTE_INDEX_SIZE 9 #define PMD_INDEX_SIZE 7 -#define PUD_INDEX_SIZE 7 +#define PUD_INDEX_SIZE 9 #define PGD_INDEX_SIZE 9 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index 90533ddcd703..be4e2878fbc0 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h @@ -7,7 +7,7 @@ #define PTE_INDEX_SIZE 12 #define PMD_INDEX_SIZE 12 #define PUD_INDEX_SIZE 0 -#define PGD_INDEX_SIZE 4 +#define PGD_INDEX_SIZE 6 #ifndef __ASSEMBLY__ #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 83efc6e81543..9dc5cd1fde1a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -97,8 +97,8 @@ extern struct task_struct *last_task_used_spe; #endif #ifdef CONFIG_PPC64 -/* 64-bit user address space is 44-bits (16TB user VM) */ -#define TASK_SIZE_USER64 (0x0000100000000000UL) +/* 64-bit user address space is 46-bits (64TB user VM) */ +#define TASK_SIZE_USER64 (0x0000400000000000UL) /* * 32-bit user address space is 4GB - 1 page diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index 0c5fa3145615..f6fc0ee813d7 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -10,8 +10,8 @@ */ #define SECTION_SIZE_BITS 24 -#define MAX_PHYSADDR_BITS 44 -#define MAX_PHYSMEM_BITS 44 +#define MAX_PHYSADDR_BITS 46 +#define MAX_PHYSMEM_BITS 46 #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 5eb00569199f..10b658ad65e1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1083,7 +1083,9 @@ _GLOBAL(do_stab_bolted) rldimi r10,r11,7,52 /* r10 = first ste of the group */ /* Calculate VSID */ - /* This is a kernel address, so protovsid = ESID */ + /* This is a kernel address, so protovsid = ESID | 1 << 37 */ + li r9,0x1 + rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 ASM_VSID_SCRAMBLE(r11, r9, 256M) rldic r9,r11,12,16 /* r9 = vsid << 12 */ diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index f6a262555ef2..1a16ca227757 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 + li r9,0x1 + /* + * for 1T we shift 12 bits more. slb_finish_load_1T will do + * the necessary adjustment + */ + rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) _GLOBAL(slb_miss_kernel_load_io) li r11,0 6: + li r9,0x1 + /* + * for 1T we shift 12 bits more. slb_finish_load_1T will do + * the necessary adjustment + */ + rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) -- cgit v1.2.3 From f033d659c3b931d8b2a16625155e20304e173c9f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:56 +0000 Subject: powerpc/mm: Update VSID allocation documentation This update the proto-VSID and VSID scramble related information to be more generic by using names instead of current values. Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu-hash64.h | 40 +++++++++++++++-------------------- arch/powerpc/mm/mmu_context_hash64.c | 8 ++++--- 2 files changed, 22 insertions(+), 26 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 23730ee65aa0..3e887467a6d1 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -324,51 +324,45 @@ extern void slb_set_size(u16 size); #endif /* __ASSEMBLY__ */ /* - * VSID allocation + * VSID allocation (256MB segment) * - * We first generate a 36-bit "proto-VSID". For kernel addresses this - * is equal to the ESID, for user addresses it is: - * (context << 15) | (esid & 0x7fff) + * We first generate a 38-bit "proto-VSID". For kernel addresses this + * is equal to the ESID | 1 << 37, for user addresses it is: + * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) * - * The two forms are distinguishable because the top bit is 0 for user - * addresses, whereas the top two bits are 1 for kernel addresses. - * Proto-VSIDs with the top two bits equal to 0b10 are reserved for - * now. + * This splits the proto-VSID into the below range + * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range + * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range + * + * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 + * That is, we assign half of the space to user processes and half + * to the kernel. * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: * * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS - * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 - * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF * - * This scramble is only well defined for proto-VSIDs below - * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are - * reserved. VSID_MULTIPLIER is prime, so in particular it is + * VSID_MULTIPLIER is prime, so in particular it is * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * Because the modulus is 2^n-1 we can compute it efficiently without * a divide or extra multiply (see below). * * This scheme has several advantages over older methods: * - * - We have VSIDs allocated for every kernel address + * - We have VSIDs allocated for every kernel address * (i.e. everything above 0xC000000000000000), except the very top * segment, which simplifies several things. * - * - We allow for 16 significant bits of ESID and 19 bits of - * context for user addresses. i.e. 16T (44 bits) of address space for - * up to half a million contexts. + * - We allow for USER_ESID_BITS significant bits of ESID and + * CONTEXT_BITS bits of context for user addresses. + * i.e. 64T (46 bits) of address space for up to half a million contexts. * - * - The scramble function gives robust scattering in the hash + * - The scramble function gives robust scattering in the hash * table (at least based on some initial results). The previous * method was more susceptible to pathological cases giving excessive * hash collisions. */ -/* - * WARNING - If you change these you must make sure the asm - * implementations in slb_allocate (slb_low.S), do_stab_bolted - * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. - */ /* * This should be computed such that protovosid * vsid_mulitplier diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index daa076c9025c..40bc5b0ace54 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -30,9 +30,11 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); /* - * The proto-VSID space has 2^35 - 1 segments available for user mappings. - * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, - * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). + * 256MB segment + * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments + * available for user mappings. Each segment contains 2^28 bytes. Each + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts + * (19 == 37 + 28 - 46). */ #define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) -- cgit v1.2.3 From 78f1dbde9fd020419313c2a0c3b602ea2427118f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Sep 2012 02:52:57 +0000 Subject: powerpc/mm: Make some of the PGTABLE_RANGE dependency explicit slice array size and slice mask size depend on PGTABLE_RANGE. Reviewed-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu-hash64.h | 15 ++++++++++----- arch/powerpc/include/asm/mmu.h | 9 +++++++++ arch/powerpc/include/asm/page_64.h | 12 ++++++++---- arch/powerpc/include/asm/pgtable-ppc64.h | 17 ++--------------- arch/powerpc/include/asm/pgtable.h | 10 ++-------- arch/powerpc/include/asm/tlbflush.h | 3 --- arch/powerpc/mm/pgtable_64.c | 12 +++++++++++- arch/powerpc/mm/slice.c | 5 +++++ 8 files changed, 47 insertions(+), 36 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 3e887467a6d1..9673f73eb8db 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -15,6 +15,13 @@ #include #include +/* + * This is necessary to get the definition of PGTABLE_RANGE which we + * need for various slices related matters. Note that this isn't the + * complete pgtable.h but only a portion of it. + */ +#include + /* * Segment table */ @@ -414,6 +421,8 @@ extern void slb_set_size(u16 size); srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ add rt,rt,rx +/* 4 bits per slice and we have one slice per 1TB */ +#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) #ifndef __ASSEMBLY__ @@ -458,11 +467,7 @@ typedef struct { #ifdef CONFIG_PPC_MM_SLICES u64 low_slices_psize; /* SLB page size encodings */ - /* - * Right now we support 64TB and 4 bits for each - * 1TB slice we need 32 bytes for 64TB. - */ - unsigned char high_slices_psize[32]; /* 4 bits per slice for now */ + unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; #else u16 sllp; /* SLB page size encoding */ #endif diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e8a26db2e8f3..5e38eedea218 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, extern u64 ppc64_rma_size; #endif /* CONFIG_PPC64 */ +struct mm_struct; +#ifdef CONFIG_DEBUG_VM +extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); +#else /* CONFIG_DEBUG_VM */ +static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) +{ +} +#endif /* !CONFIG_DEBUG_VM */ + #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 6c9bef4cb6a0..cd915d6b093d 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -78,14 +78,18 @@ extern u64 ppc64_pft_size; #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) +/* + * 1 bit per slice and we have one slice per 1TB + * Right now we support only 64TB. + * IF we change this we will have to change the type + * of high_slices + */ +#define SLICE_MASK_SIZE 8 + #ifndef __ASSEMBLY__ struct slice_mask { u16 low_slices; - /* - * This should be derived out of PGTABLE_RANGE. For the current - * max 64TB, u64 should be ok. - */ u64 high_slices; }; diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 8af1cf27fd42..0182c203e411 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -21,17 +21,6 @@ #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) -/* Some sanity checking */ -#if TASK_SIZE_USER64 > PGTABLE_RANGE -#error TASK_SIZE_USER64 exceeds pagetable range -#endif - -#ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) -#error TASK_SIZE_USER64 exceeds user VSID range -#endif -#endif - /* * Define the address range of the kernel non-linear virtual area */ @@ -117,9 +106,6 @@ #ifndef __ASSEMBLY__ -#include -#include - /* * This is the default implementation of various PTE accessors, it's * used in all cases except Book3S with 64K pages where we have a @@ -198,7 +184,8 @@ /* to find an entry in a kernel page-table-directory */ /* This now only contains the vmalloc pages */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) - +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 2e0e4110f7ae..a9cbd3ba5c33 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -9,14 +9,6 @@ struct mm_struct; -#ifdef CONFIG_DEBUG_VM -extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); -#else /* CONFIG_DEBUG_VM */ -static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) -{ -} -#endif /* !CONFIG_DEBUG_VM */ - #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC64) @@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) #ifndef __ASSEMBLY__ +#include + /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index fc02d1dee955..61a59271665b 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); -extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); - #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 297d49547ea8..e212a271c7a4 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -55,8 +55,18 @@ #include "mmu_decl.h" -unsigned long ioremap_bot = IOREMAP_BASE; +/* Some sanity checking */ +#if TASK_SIZE_USER64 > PGTABLE_RANGE +#error TASK_SIZE_USER64 exceeds pagetable range +#endif + +#ifdef CONFIG_PPC_STD_MMU_64 +#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#error TASK_SIZE_USER64 exceeds user VSID range +#endif +#endif +unsigned long ioremap_bot = IOREMAP_BASE; #ifdef CONFIG_PPC_MMU_NOHASH static void *early_alloc_pgtable(unsigned long size) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index b4e996a398bd..5829d2a950d4 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -34,6 +34,11 @@ #include #include +/* some sanity checks */ +#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE +#error PGTABLE_RANGE exceeds slice_mask high_slices size +#endif + static DEFINE_SPINLOCK(slice_convert_lock); -- cgit v1.2.3 From 8e166991c0c3631b8af5a26990df77fa2a1d09e6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 20 Sep 2012 22:08:28 +0000 Subject: powerpc: Remove tlb batching hack for nighthawk In hpte_init_native() we call tlb_batching_enabled() to decide if we should setup ppc_md.flush_hash_range. tlb_batching_enabled() checks the _unflattened_ device tree, to see if we are running on a nighthawk. Since commit a223535 ("dont allow pSeries_probe to succeed without initialising MMU", Dec 2006), hpte_init_native() has been called from pSeries_probe() - at which point we have not yet unflattened the device tree. This means tlb_batching_enabled() will always return true, so the hack has effectively been disabled since Dec 2006. Ergo, I think we can drop it. Signed-off-by: Michael Ellerman Acked-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_native_64.c | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index a4a1c728f269..ffc1e00f7a22 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -569,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local) local_irq_restore(flags); } -#ifdef CONFIG_PPC_PSERIES -/* Disable TLB batching on nighthawk */ -static inline int tlb_batching_enabled(void) -{ - struct device_node *root = of_find_node_by_path("/"); - int enabled = 1; - - if (root) { - const char *model = of_get_property(root, "model", NULL); - if (model && !strcmp(model, "IBM,9076-N81")) - enabled = 0; - of_node_put(root); - } - - return enabled; -} -#else -static inline int tlb_batching_enabled(void) -{ - return 1; -} -#endif - void __init hpte_init_native(void) { ppc_md.hpte_invalidate = native_hpte_invalidate; @@ -600,6 +577,5 @@ void __init hpte_init_native(void) ppc_md.hpte_insert = native_hpte_insert; ppc_md.hpte_remove = native_hpte_remove; ppc_md.hpte_clear_all = native_hpte_clear; - if (tlb_batching_enabled()) - ppc_md.flush_hash_range = native_flush_hash_range; + ppc_md.flush_hash_range = native_flush_hash_range; } -- cgit v1.2.3