diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2014-12-02 06:19:20 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2014-12-02 06:19:20 +0300 |
commit | b5be75d00833a3f0ff76b1d7473119be33367faa (patch) | |
tree | 88106f598970b086f06a0d7dd123b6d00adf9f4a /arch/powerpc | |
parent | e39f223fc93580c86ccf6b3422033e349f57f0dd (diff) | |
parent | d557b09800dab5dd6804e5b79324069abcf0be11 (diff) | |
download | linux-b5be75d00833a3f0ff76b1d7473119be33367faa.tar.xz |
Merge remote-tracking branch 'benh/next' into next
Merge updates collected & acked by Ben. A few EEH patches from Gavin,
some mm updates from Aneesh and a few odds and ends.
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/eeh.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/opal.h | 104 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/tlbflush.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/eeh.c | 41 | ||||
-rw-r--r-- | arch/powerpc/kernel/eeh_driver.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 70 | ||||
-rw-r--r-- | arch/powerpc/mm/hugepage-hash64.c | 54 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 69 | ||||
-rw-r--r-- | arch/powerpc/oprofile/backtrace.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/eeh-ioda.c | 15 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/opal.c | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 2 |
18 files changed, 162 insertions, 284 deletions
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index ca07f9c27335..0652ebe117af 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -39,6 +39,7 @@ struct device_node; #define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ #define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ #define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */ +#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */ /* * Delay for PE reset, all in ms @@ -72,6 +73,7 @@ struct device_node; #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ #define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ +#define EEH_PE_RESET (1 << 3) /* PE reset in progress */ #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 15c9150a58cc..e5c0919acca4 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -60,7 +60,7 @@ struct machdep_calls { void (*hugepage_invalidate)(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize); + int psize, int ssize, int local); /* special for kexec, to be called in real mode, linear mapping is * destroyed as well */ void (*hpte_clear_all)(void); diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 60250e2d1f0d..5cd8d2fddba9 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -288,62 +288,6 @@ enum OpalMessageType { OPAL_MSG_TYPE_MAX, }; -/* Machine check related definitions */ -enum OpalMCE_Version { - OpalMCE_V1 = 1, -}; - -enum OpalMCE_Severity { - OpalMCE_SEV_NO_ERROR = 0, - OpalMCE_SEV_WARNING = 1, - OpalMCE_SEV_ERROR_SYNC = 2, - OpalMCE_SEV_FATAL = 3, -}; - -enum OpalMCE_Disposition { - OpalMCE_DISPOSITION_RECOVERED = 0, - OpalMCE_DISPOSITION_NOT_RECOVERED = 1, -}; - -enum OpalMCE_Initiator { - OpalMCE_INITIATOR_UNKNOWN = 0, - OpalMCE_INITIATOR_CPU = 1, -}; - -enum OpalMCE_ErrorType { - OpalMCE_ERROR_TYPE_UNKNOWN = 0, - OpalMCE_ERROR_TYPE_UE = 1, - OpalMCE_ERROR_TYPE_SLB = 2, - OpalMCE_ERROR_TYPE_ERAT = 3, - OpalMCE_ERROR_TYPE_TLB = 4, -}; - -enum OpalMCE_UeErrorType { - OpalMCE_UE_ERROR_INDETERMINATE = 0, - OpalMCE_UE_ERROR_IFETCH = 1, - OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, - OpalMCE_UE_ERROR_LOAD_STORE = 3, - OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, -}; - -enum OpalMCE_SlbErrorType { - OpalMCE_SLB_ERROR_INDETERMINATE = 0, - OpalMCE_SLB_ERROR_PARITY = 1, - OpalMCE_SLB_ERROR_MULTIHIT = 2, -}; - -enum OpalMCE_EratErrorType { - OpalMCE_ERAT_ERROR_INDETERMINATE = 0, - OpalMCE_ERAT_ERROR_PARITY = 1, - OpalMCE_ERAT_ERROR_MULTIHIT = 2, -}; - -enum OpalMCE_TlbErrorType { - OpalMCE_TLB_ERROR_INDETERMINATE = 0, - OpalMCE_TLB_ERROR_PARITY = 1, - OpalMCE_TLB_ERROR_MULTIHIT = 2, -}; - enum OpalThreadStatus { OPAL_THREAD_INACTIVE = 0x0, OPAL_THREAD_STARTED = 0x1, @@ -467,54 +411,6 @@ struct opal_ipmi_msg { uint8_t data[]; }; -struct opal_machine_check_event { - enum OpalMCE_Version version:8; /* 0x00 */ - uint8_t in_use; /* 0x01 */ - enum OpalMCE_Severity severity:8; /* 0x02 */ - enum OpalMCE_Initiator initiator:8; /* 0x03 */ - enum OpalMCE_ErrorType error_type:8; /* 0x04 */ - enum OpalMCE_Disposition disposition:8; /* 0x05 */ - uint8_t reserved_1[2]; /* 0x06 */ - uint64_t gpr3; /* 0x08 */ - uint64_t srr0; /* 0x10 */ - uint64_t srr1; /* 0x18 */ - union { /* 0x20 */ - struct { - enum OpalMCE_UeErrorType ue_error_type:8; - uint8_t effective_address_provided; - uint8_t physical_address_provided; - uint8_t reserved_1[5]; - uint64_t effective_address; - uint64_t physical_address; - uint8_t reserved_2[8]; - } ue_error; - - struct { - enum OpalMCE_SlbErrorType slb_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } slb_error; - - struct { - enum OpalMCE_EratErrorType erat_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } erat_error; - - struct { - enum OpalMCE_TlbErrorType tlb_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } tlb_error; - } u; -}; - /* FSP memory errors handling */ enum OpalMemErr_Version { OpalMemErr_V1 = 1, diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5139ea6910b..24a386cbb928 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ #define get_slb_shadow() (get_paca()->slb_shadow_ptr) struct task_struct; -struct opal_machine_check_event; /* * Defines the layout of the paca. @@ -153,12 +152,6 @@ struct paca_struct { u64 tm_scratch; /* TM scratch area for reclaim */ #endif -#ifdef CONFIG_PPC_POWERNV - /* Pointer to OPAL machine check event structure set by the - * early exception handler for use by high level C handler - */ - struct opal_machine_check_event *opal_mc_evt; -#endif #ifdef CONFIG_PPC_BOOK3S_64 /* Exclusive emergency stack pointer for machine check exception. */ void *mc_emergency_sp; diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index cd7c2719d3ef..4d3ecd8d8929 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -127,7 +127,9 @@ static inline void arch_leave_lazy_mmu_mode(void) extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, int local); extern void flush_hash_range(unsigned long number, int local); - +extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr, + pmd_t *pmdp, unsigned int psize, int ssize, + int local); static inline void local_flush_tlb_mm(struct mm_struct *mm) { diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9d7dede2847c..c161ef3f28a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -726,12 +726,5 @@ int main(void) arch.timing_last_enter.tv32.tbl)); #endif -#ifdef CONFIG_PPC_POWERNV - DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3)); - DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0)); - DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1)); - DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt)); -#endif - return 0; } diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 2248a1999c64..e1b6d8e17289 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -143,6 +143,8 @@ static int __init eeh_setup(char *str) { if (!strcmp(str, "off")) eeh_add_flag(EEH_FORCE_DISABLED); + else if (!strcmp(str, "early_log")) + eeh_add_flag(EEH_EARLY_DUMP_LOG); return 1; } @@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) int eeh_reset_pe(struct eeh_pe *pe) { int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); - int i, rc; + int i, state, ret; + + /* Mark as reset and block config space */ + eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); /* Take three shots at resetting the bus */ - for (i=0; i<3; i++) { + for (i = 0; i < 3; i++) { eeh_reset_pe_once(pe); /* * EEH_PE_ISOLATED is expected to be removed after * BAR restore. */ - rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if ((rc & flags) == flags) - return 0; + state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); + if ((state & flags) == flags) { + ret = 0; + goto out; + } - if (rc < 0) { - pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", + if (state < 0) { + pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x", __func__, pe->phb->global_number, pe->addr); - return -1; + ret = -ENOTRECOVERABLE; + goto out; } - pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", - i+1, pe->phb->global_number, pe->addr, rc); + + /* We might run out of credits */ + ret = -EIO; + pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", + __func__, state, pe->phb->global_number, pe->addr, (i + 1)); } - return -1; +out: + eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); + return ret; } /** @@ -920,11 +933,8 @@ int eeh_init(void) pr_warn("%s: Platform EEH operation not found\n", __func__); return -EEXIST; - } else if ((ret = eeh_ops->init())) { - pr_warn("%s: Failed to call platform init function (%d)\n", - __func__, ret); + } else if ((ret = eeh_ops->init())) return ret; - } /* Initialize EEH event */ ret = eeh_event_init(); @@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) static struct pci_device_id eeh_reset_ids[] = { { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ + { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */ { 0 } }; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 6535936bdf27..b17e793ba67e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_report_error, &result); /* Issue reset */ - eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); ret = eeh_reset_pe(pe); if (ret) { - eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); return ret; } - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Unfreeze the PE */ ret = eeh_clear_pe_frozen_state(pe, true); @@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed. */ - eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); rc = eeh_reset_pe(pe); - if (rc) { - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + if (rc) return rc; - } pci_lock_rescan_remove(); /* Restore PE */ eeh_ops->configure_bridge(pe); eeh_pe_restore_bars(pe); - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Clear frozen state */ rc = eeh_clear_pe_frozen_state(pe, false); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a1d45c161e24..ad62f4d6ce31 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1312,23 +1312,6 @@ hmi_exception_after_realmode: EXCEPTION_PROLOG_0(PACA_EXGEN) b hmi_exception_hv -#ifdef CONFIG_PPC_POWERNV -_GLOBAL(opal_mc_secondary_handler) - HMT_MEDIUM_PPR_DISCARD - SET_SCRATCH0(r13) - GET_PACA(r13) - clrldi r3,r3,2 - tovirt(r3,r3) - std r3,PACA_OPAL_MC_EVT(r13) - ld r13,OPAL_MC_SRR0(r3) - mtspr SPRN_SRR0,r13 - ld r13,OPAL_MC_SRR1(r3) - mtspr SPRN_SRR1,r13 - ld r3,OPAL_MC_GPR3(r3) - GET_SCRATCH0(r13) - b machine_check_pSeries -#endif /* CONFIG_PPC_POWERNV */ - #define MACHINE_CHECK_HANDLER_WINDUP \ /* Clear MSR_RI before setting SRR0 and SRR1. */\ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index d53288a08c37..13700911b522 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -294,8 +294,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", vpn, want_v & HPTE_V_AVPN, slot, newpp); - native_lock_hpte(hptep); - hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do @@ -308,16 +306,24 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, DBG_LOW(" -> miss\n"); ret = -1; } else { - DBG_LOW(" -> hit\n"); - /* Update the HPTE */ - hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); + native_lock_hpte(hptep); + /* recheck with locks held */ + hpte_v = be64_to_cpu(hptep->v); + if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || + !(hpte_v & HPTE_V_VALID))) { + ret = -1; + } else { + DBG_LOW(" -> hit\n"); + /* Update the HPTE */ + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & + ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N | + HPTE_R_C))); + } + native_unlock_hpte(hptep); } - native_unlock_hpte(hptep); - /* Ensure it is out of the tlb too. */ tlbie(vpn, bpsize, apsize, ssize, local); - return ret; } @@ -419,7 +425,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, static void native_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize) + int psize, int ssize, int local) { int i; struct hash_pte *hptep; @@ -465,7 +471,7 @@ static void native_hugepage_invalidate(unsigned long vsid, * instruction compares entry_VA in tlb with the VA specified * here */ - tlbie(vpn, psize, actual_psize, ssize, 0); + tlbie(vpn, psize, actual_psize, ssize, local); } local_irq_restore(flags); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index f01027731e23..68211d398fdb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1315,6 +1315,76 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, #endif } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void flush_hash_hugepage(unsigned long vsid, unsigned long addr, + pmd_t *pmdp, unsigned int psize, int ssize, int local) +{ + int i, max_hpte_count, valid; + unsigned long s_addr; + unsigned char *hpte_slot_array; + unsigned long hidx, shift, vpn, hash, slot; + + s_addr = addr & HPAGE_PMD_MASK; + hpte_slot_array = get_hpte_slot_array(pmdp); + /* + * IF we try to do a HUGE PTE update after a withdraw is done. + * we will find the below NULL. This happens when we do + * split_huge_page_pmd + */ + if (!hpte_slot_array) + return; + + if (ppc_md.hugepage_invalidate) { + ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, + psize, ssize, local); + goto tm_abort; + } + /* + * No bluk hpte removal support, invalidate each entry + */ + shift = mmu_psize_defs[psize].shift; + max_hpte_count = HPAGE_PMD_SIZE >> shift; + for (i = 0; i < max_hpte_count; i++) { + /* + * 8 bits per each hpte entries + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] + */ + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + ppc_md.hpte_invalidate(slot, vpn, psize, + MMU_PAGE_16M, ssize, local); + } +tm_abort: +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Transactions are not aborted by tlbiel, only tlbie. + * Without, syncing a page back to a block device w/ PIO could pick up + * transactional data (bad!) so we force an abort here. Before the + * sync the page will be made read-only, which will flush_hash_page. + * BIG ISSUE here: if the kernel uses a page from userspace without + * unmapping it first, it may see the speculated version. + */ + if (local && cpu_has_feature(CPU_FTR_TM) && + current->thread.regs && + MSR_TM_ACTIVE(current->thread.regs->msr)) { + tm_enable(); + tm_abort(TM_CAUSE_TLBI); + } +#endif +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + void flush_hash_range(unsigned long number, int local) { if (ppc_md.flush_hash_range) diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 5f5e6328c21c..3a648cd363ae 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -18,57 +18,6 @@ #include <linux/mm.h> #include <asm/machdep.h> -static void invalidate_old_hpte(unsigned long vsid, unsigned long addr, - pmd_t *pmdp, unsigned int psize, int ssize) -{ - int i, max_hpte_count, valid; - unsigned long s_addr; - unsigned char *hpte_slot_array; - unsigned long hidx, shift, vpn, hash, slot; - - s_addr = addr & HPAGE_PMD_MASK; - hpte_slot_array = get_hpte_slot_array(pmdp); - /* - * IF we try to do a HUGE PTE update after a withdraw is done. - * we will find the below NULL. This happens when we do - * split_huge_page_pmd - */ - if (!hpte_slot_array) - return; - - if (ppc_md.hugepage_invalidate) - return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, - psize, ssize); - /* - * No bluk hpte removal support, invalidate each entry - */ - shift = mmu_psize_defs[psize].shift; - max_hpte_count = HPAGE_PMD_SIZE >> shift; - for (i = 0; i < max_hpte_count; i++) { - /* - * 8 bits per each hpte entries - * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] - */ - valid = hpte_valid(hpte_slot_array, i); - if (!valid) - continue; - hidx = hpte_hash_index(hpte_slot_array, i); - - /* get the vpn */ - addr = s_addr + (i * (1ul << shift)); - vpn = hpt_vpn(addr, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, vpn, psize, - MMU_PAGE_16M, ssize, 0); - } -} - - int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pmd_t *pmdp, unsigned long trap, int local, int ssize, unsigned int psize) @@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, * hash page table entries. */ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) - invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); + flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, + ssize, local); } valid = hpte_valid(hpte_slot_array, index); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8c9b8115867c..868ab0fc5fbf 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -355,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val, if (size != 0) { if (sscanf(val, "%lu", &npages) <= 0) npages = 0; + if (npages > MAX_NUMBER_GPAGES) { + pr_warn("MMU: %lu pages requested for page " + "size %llu KB, limiting to " + __stringify(MAX_NUMBER_GPAGES) "\n", + npages, size / 1024); + npages = MAX_NUMBER_GPAGES; + } gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; size = 0; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 87ff0c1908a9..eea9fa1f8ae7 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -739,29 +739,14 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long old_pmd) { - int ssize, i; - unsigned long s_addr; - int max_hpte_count; - unsigned int psize, valid; - unsigned char *hpte_slot_array; - unsigned long hidx, vpn, vsid, hash, shift, slot; - - /* - * Flush all the hptes mapping this hugepage - */ - s_addr = addr & HPAGE_PMD_MASK; - hpte_slot_array = get_hpte_slot_array(pmdp); - /* - * IF we try to do a HUGE PTE update after a withdraw is done. - * we will find the below NULL. This happens when we do - * split_huge_page_pmd - */ - if (!hpte_slot_array) - return; + int ssize, local = 0; + unsigned int psize; + unsigned long vsid; + const struct cpumask *tmp; /* get the base page size,vsid and segment size */ #ifdef CONFIG_DEBUG_VM - psize = get_slice_psize(mm, s_addr); + psize = get_slice_psize(mm, addr); BUG_ON(psize == MMU_PAGE_16M); #endif if (old_pmd & _PAGE_COMBO) @@ -769,46 +754,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, else psize = MMU_PAGE_64K; - if (!is_kernel_addr(s_addr)) { - ssize = user_segment_size(s_addr); - vsid = get_vsid(mm->context.id, s_addr, ssize); + if (!is_kernel_addr(addr)) { + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); WARN_ON(vsid == 0); } else { - vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } - if (ppc_md.hugepage_invalidate) - return ppc_md.hugepage_invalidate(vsid, s_addr, - hpte_slot_array, - psize, ssize); - /* - * No bluk hpte removal support, invalidate each entry - */ - shift = mmu_psize_defs[psize].shift; - max_hpte_count = HPAGE_PMD_SIZE >> shift; - for (i = 0; i < max_hpte_count; i++) { - /* - * 8 bits per each hpte entries - * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] - */ - valid = hpte_valid(hpte_slot_array, i); - if (!valid) - continue; - hidx = hpte_hash_index(hpte_slot_array, i); - - /* get the vpn */ - addr = s_addr + (i * (1ul << shift)); - vpn = hpt_vpn(addr, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, vpn, psize, - MMU_PAGE_16M, ssize, 0); - } + tmp = cpumask_of(smp_processor_id()); + if (cpumask_equal(mm_cpumask(mm), tmp)) + local = 1; + + return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local); } static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index 6adf55fa5d88..ecc66d5f02c9 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -10,7 +10,7 @@ #include <linux/oprofile.h> #include <linux/sched.h> #include <asm/processor.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/compat.h> #include <asm/oprofile_impl.h> @@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) first_frame = 0; } } else { + pagefault_disable(); #ifdef CONFIG_PPC64 if (!is_32bit_task()) { while (depth--) { @@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) break; first_frame = 0; } - + pagefault_enable(); return; } #endif @@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) break; first_frame = 0; } + pagefault_enable(); } } diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index db3803e21483..2809c9895288 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -353,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe) } else if (!(pe->state & EEH_PE_ISOLATED)) { eeh_pe_state_mark(pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; @@ -372,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) * moving forward, we have to return operational * state during PE reset. */ - if (pe->state & EEH_PE_CFG_BLOCKED) { + if (pe->state & EEH_PE_RESET) { result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | @@ -451,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) eeh_pe_state_mark(pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; @@ -730,7 +736,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) { - pnv_pci_dump_phb_diag_data(pe->phb, pe->data); + if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); return 0; } @@ -1086,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) !((*pe)->state & EEH_PE_ISOLATED)) { eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(*pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data((*pe)->phb, + (*pe)->data); } /* diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index a1c37f9d60d2..cb0b6de79cd4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -50,7 +50,6 @@ static int mc_recoverable_range_len; struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); -extern u64 opal_mc_secondary_handler[]; static unsigned int *opal_irqs; static unsigned int opal_irq_count; static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index d214a012b026..832f221840f2 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize) + int psize, int ssize, int local) { int i, index = 0; unsigned long s_addr = addr; |