diff options
Diffstat (limited to 'arch/powerpc/platforms')
21 files changed, 511 insertions, 110 deletions
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index a0589aac4163..69794d9389c2 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -24,6 +24,7 @@ #include <asm/machdep.h> #include <asm/iommu.h> #include <asm/ppc-pci.h> +#include <asm/isa-bridge.h> #include "maple.h" diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 479c25601612..4ee837e6391a 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -237,15 +237,21 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, show_fastsleep_workaround_applyonce, store_fastsleep_workaround_applyonce); +/* + * The default stop state that will be used by ppc_md.power_save + * function on platforms that support stop instruction. + */ +u64 pnv_default_stop_val; +u64 pnv_default_stop_mask; /* * Used for ppc_md.power_save which needs a function with no parameters */ static void power9_idle(void) { - /* Requesting stop state 0 */ - power9_idle_stop(0); + power9_idle_stop(pnv_default_stop_val, pnv_default_stop_mask); } + /* * First deep stop state. Used to figure out when to save/restore * hypervisor context. @@ -253,9 +259,11 @@ static void power9_idle(void) u64 pnv_first_deep_stop_state = MAX_STOP_STATE; /* - * Deepest stop idle state. Used when a cpu is offlined + * psscr value and mask of the deepest stop idle state. + * Used when a cpu is offlined. */ -u64 pnv_deepest_stop_state; +u64 pnv_deepest_stop_psscr_val; +u64 pnv_deepest_stop_psscr_mask; /* * Power ISA 3.0 idle initialization. @@ -292,53 +300,157 @@ u64 pnv_deepest_stop_state; * Bits 60:63 - Requested Level * Used to specify which power-saving level must be entered on executing * stop instruction + */ + +int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) +{ + int err = 0; + + /* + * psscr_mask == 0xf indicates an older firmware. + * Set remaining fields of psscr to the default values. + * See NOTE above definition of PSSCR_HV_DEFAULT_VAL + */ + if (*psscr_mask == 0xf) { + *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; + *psscr_mask = PSSCR_HV_DEFAULT_MASK; + return err; + } + + /* + * New firmware is expected to set the psscr_val bits correctly. + * Validate that the following invariants are correctly maintained by + * the new firmware. + * - ESL bit value matches the EC bit value. + * - ESL bit is set for all the deep stop states. + */ + if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { + err = ERR_EC_ESL_MISMATCH; + } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && + GET_PSSCR_ESL(*psscr_val) == 0) { + err = ERR_DEEP_STATE_ESL_MISMATCH; + } + + return err; +} + +/* + * pnv_arch300_idle_init: Initializes the default idle state, first + * deep idle state and deepest idle state on + * ISA 3.0 CPUs. * * @np: /ibm,opal/power-mgt device node * @flags: cpu-idle-state-flags array * @dt_idle_states: Number of idle state entries * Returns 0 on success */ -static int __init pnv_arch300_idle_init(struct device_node *np, u32 *flags, +static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags, int dt_idle_states) { u64 *psscr_val = NULL; + u64 *psscr_mask = NULL; + u32 *residency_ns = NULL; + u64 max_residency_ns = 0; int rc = 0, i; + bool default_stop_found = false, deepest_stop_found = false; - psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), - GFP_KERNEL); - if (!psscr_val) { + psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL); + psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL); + residency_ns = kcalloc(dt_idle_states, sizeof(*residency_ns), + GFP_KERNEL); + + if (!psscr_val || !psscr_mask || !residency_ns) { rc = -1; goto out; } + if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n"); + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); + rc = -1; + goto out; + } + + if (of_property_read_u64_array(np, + "ibm,cpu-idle-state-psscr-mask", + psscr_mask, dt_idle_states)) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); + rc = -1; + goto out; + } + + if (of_property_read_u32_array(np, + "ibm,cpu-idle-state-residency-ns", + residency_ns, dt_idle_states)) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); rc = -1; goto out; } /* - * Set pnv_first_deep_stop_state and pnv_deepest_stop_state. + * Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask}, + * and the pnv_default_stop_{val,mask}. + * * pnv_first_deep_stop_state should be set to the first stop * level to cause hypervisor state loss. - * pnv_deepest_stop_state should be set to the deepest stop - * stop state. + * + * pnv_deepest_stop_{val,mask} should be set to values corresponding to + * the deepest stop state. + * + * pnv_default_stop_{val,mask} should be set to values corresponding to + * the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state. */ pnv_first_deep_stop_state = MAX_STOP_STATE; for (i = 0; i < dt_idle_states; i++) { + int err; u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK; if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) && (pnv_first_deep_stop_state > psscr_rl)) pnv_first_deep_stop_state = psscr_rl; - if (pnv_deepest_stop_state < psscr_rl) - pnv_deepest_stop_state = psscr_rl; + err = validate_psscr_val_mask(&psscr_val[i], &psscr_mask[i], + flags[i]); + if (err) { + report_invalid_psscr_val(psscr_val[i], err); + continue; + } + + if (max_residency_ns < residency_ns[i]) { + max_residency_ns = residency_ns[i]; + pnv_deepest_stop_psscr_val = psscr_val[i]; + pnv_deepest_stop_psscr_mask = psscr_mask[i]; + deepest_stop_found = true; + } + + if (!default_stop_found && + (flags[i] & OPAL_PM_STOP_INST_FAST)) { + pnv_default_stop_val = psscr_val[i]; + pnv_default_stop_mask = psscr_mask[i]; + default_stop_found = true; + } + } + + if (!default_stop_found) { + pnv_default_stop_val = PSSCR_HV_DEFAULT_VAL; + pnv_default_stop_mask = PSSCR_HV_DEFAULT_MASK; + pr_warn("Setting default stop psscr val=0x%016llx,mask=0x%016llx\n", + pnv_default_stop_val, pnv_default_stop_mask); + } + + if (!deepest_stop_found) { + pnv_deepest_stop_psscr_val = PSSCR_HV_DEFAULT_VAL; + pnv_deepest_stop_psscr_mask = PSSCR_HV_DEFAULT_MASK; + pr_warn("Setting default stop psscr val=0x%016llx,mask=0x%016llx\n", + pnv_deepest_stop_psscr_val, + pnv_deepest_stop_psscr_mask); } out: kfree(psscr_val); + kfree(psscr_mask); + kfree(residency_ns); return rc; } @@ -373,7 +485,7 @@ static void __init pnv_probe_idle_states(void) } if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (pnv_arch300_idle_init(np, flags, dt_idle_states)) + if (pnv_power9_idle_init(np, flags, dt_idle_states)) goto out; } diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index c0a8201cb4d9..88f3c61eec95 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -180,7 +180,8 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) "An XSCOM operation completed", "SCOM has set a reserved FIR bit to cause recovery", "Debug trigger has set a reserved FIR bit to cause recovery", - "A hypervisor resource error occurred" + "A hypervisor resource error occurred", + "CAPP recovery process is in progress", }; /* Print things out */ diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 998316bf2dad..ecdcba9d1220 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -183,8 +183,9 @@ void opal_event_shutdown(void) int __init opal_event_init(void) { struct device_node *dn, *opal_node; - const __be32 *irqs; - int i, irqlen, rc = 0; + const char **names; + u32 *irqs; + int i, rc; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { @@ -209,31 +210,56 @@ int __init opal_event_init(void) goto out; } - /* Get interrupt property */ - irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); - opal_irq_count = irqs ? (irqlen / 4) : 0; + /* Get opal-interrupts property and names if present */ + rc = of_property_count_u32_elems(opal_node, "opal-interrupts"); + if (rc < 0) + goto out; + + opal_irq_count = rc; pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count); - /* Install interrupt handlers */ + irqs = kcalloc(opal_irq_count, sizeof(*irqs), GFP_KERNEL); + names = kcalloc(opal_irq_count, sizeof(*names), GFP_KERNEL); opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL); - for (i = 0; irqs && i < opal_irq_count; i++, irqs++) { - unsigned int irq, virq; + + if (WARN_ON(!irqs || !names || !opal_irqs)) + goto out_free; + + rc = of_property_read_u32_array(opal_node, "opal-interrupts", + irqs, opal_irq_count); + if (rc < 0) { + pr_err("Error %d reading opal-interrupts array\n", rc); + goto out_free; + } + + /* It's not an error for the names to be missing */ + of_property_read_string_array(opal_node, "opal-interrupts-names", + names, opal_irq_count); + + /* Install interrupt handlers */ + for (i = 0; i < opal_irq_count; i++) { + unsigned int virq; + char *name; /* Get hardware and virtual IRQ */ - irq = be32_to_cpup(irqs); - virq = irq_create_mapping(NULL, irq); + virq = irq_create_mapping(NULL, irqs[i]); if (!virq) { - pr_warn("Failed to map irq 0x%x\n", irq); + pr_warn("Failed to map irq 0x%x\n", irqs[i]); continue; } + if (names[i] && strlen(names[i])) + name = kasprintf(GFP_KERNEL, "opal-%s", names[i]); + else + name = kasprintf(GFP_KERNEL, "opal"); + /* Install interrupt handler */ rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW, - "opal", NULL); + name, NULL); if (rc) { irq_dispose_mapping(virq); pr_warn("Error %d requesting irq %d (0x%x)\n", - rc, virq, irq); + rc, virq, irqs[i]); continue; } @@ -241,6 +267,9 @@ int __init opal_event_init(void) opal_irqs[i] = virq; } +out_free: + kfree(irqs); + kfree(names); out: of_node_put(opal_node); return rc; diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index 4886eb8b6381..a91d7876fae2 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -18,11 +18,11 @@ #include <asm/machdep.h> #include <asm/firmware.h> -#include <asm/xics.h> #include <asm/opal.h> #include <asm/prom.h> #include <linux/uaccess.h> #include <asm/debug.h> +#include <asm/isa-bridge.h> static int opal_lpc_chip_id = -1; @@ -386,7 +386,7 @@ static int opal_lpc_init_debugfs(void) machine_device_initcall(powernv, opal_lpc_init_debugfs); #endif /* CONFIG_DEBUG_FS */ -void opal_lpc_init(void) +void __init opal_lpc_init(void) { struct device_node *np; @@ -406,9 +406,17 @@ void opal_lpc_init(void) if (opal_lpc_chip_id < 0) return; - /* Setup special IO ops */ - ppc_pci_io = opal_lpc_io; - isa_io_special = true; - - pr_info("OPAL: Power8 LPC bus found, chip ID %d\n", opal_lpc_chip_id); + /* Does it support direct mapping ? */ + if (of_get_property(np, "ranges", NULL)) { + pr_info("OPAL: Found memory mapped LPC bus on chip %d\n", + opal_lpc_chip_id); + isa_bridge_init_non_pci(np); + } else { + pr_info("OPAL: Found non-mapped LPC bus on chip %d\n", + opal_lpc_chip_id); + + /* Setup special IO ops */ + ppc_pci_io = opal_lpc_io; + isa_io_special = true; + } } diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 39d6ff9e5630..7a9cde0cfbd1 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -123,6 +123,10 @@ void __init opal_msglog_init(void) return; } + /* Report maximum size */ + opal_msglog_attr.size = be32_to_cpu(mc->ibuf_size) + + be32_to_cpu(mc->obuf_size); + opal_memcons = mc; } diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 3aa40f1b20f5..6693f75e93d1 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -58,14 +58,16 @@ END_FTR_SECTION(0, 1); \ #define OPAL_CALL(name, token) \ _GLOBAL_TOC(name); \ + mfmsr r12; \ mflr r0; \ + andi. r11,r12,MSR_IR|MSR_DR; \ std r0,PPC_LR_STKOFF(r1); \ li r0,token; \ + beq opal_real_call; \ OPAL_BRANCH(opal_tracepoint_entry) \ - mfcr r12; \ - stw r12,8(r1); \ + mfcr r11; \ + stw r11,8(r1); \ li r11,0; \ - mfmsr r12; \ ori r11,r11,MSR_EE; \ std r12,PACASAVEDMSR(r13); \ andc r12,r12,r11; \ @@ -98,6 +100,30 @@ opal_return: mtcr r4; rfid +opal_real_call: + mfcr r11 + stw r11,8(r1) + /* Set opal return address */ + LOAD_REG_ADDR(r11, opal_return_realmode) + mtlr r11 + li r11,MSR_LE + andc r12,r12,r11 + mtspr SPRN_HSRR1,r12 + LOAD_REG_ADDR(r11,opal) + ld r12,8(r11) + ld r2,0(r11) + mtspr SPRN_HSRR0,r12 + hrfid + +opal_return_realmode: + FIXUP_ENDIAN + ld r2,PACATOC(r13); + lwz r11,8(r1); + ld r12,PPC_LR_STKOFF(r1) + mtcr r11; + mtlr r12 + blr + #ifdef CONFIG_TRACEPOINTS opal_tracepoint_entry: stdu r1,-STACKFRAMESIZE(r1) @@ -146,7 +172,7 @@ opal_tracepoint_entry: opal_tracepoint_return: std r3,STK_REG(R31)(r1) mr r4,r3 - ld r0,STK_REG(R23)(r1) + ld r3,STK_REG(R23)(r1) bl __trace_opal_exit ld r3,STK_REG(R31)(r1) addi r1,r1,STACKFRAMESIZE @@ -155,36 +181,6 @@ opal_tracepoint_return: blr #endif -#define OPAL_CALL_REAL(name, token) \ - _GLOBAL_TOC(name); \ - mflr r0; \ - std r0,PPC_LR_STKOFF(r1); \ - li r0,token; \ - mfcr r12; \ - stw r12,8(r1); \ - \ - /* Set opal return address */ \ - LOAD_REG_ADDR(r11, opal_return_realmode); \ - mtlr r11; \ - mfmsr r12; \ - li r11,MSR_LE; \ - andc r12,r12,r11; \ - mtspr SPRN_HSRR1,r12; \ - LOAD_REG_ADDR(r11,opal); \ - ld r12,8(r11); \ - ld r2,0(r11); \ - mtspr SPRN_HSRR0,r12; \ - hrfid - -opal_return_realmode: - FIXUP_ENDIAN - ld r2,PACATOC(r13); - lwz r11,8(r1); - ld r12,PPC_LR_STKOFF(r1) - mtcr r11; - mtlr r12 - blr - OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL); OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE); @@ -208,7 +204,6 @@ OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE); OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD); OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD); OPAL_CALL(opal_set_xive, OPAL_SET_XIVE); -OPAL_CALL_REAL(opal_rm_set_xive, OPAL_SET_XIVE); OPAL_CALL(opal_get_xive, OPAL_GET_XIVE); OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER); OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); @@ -264,7 +259,6 @@ OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); -OPAL_CALL_REAL(opal_rm_resync_timebase, OPAL_RESYNC_TIMEBASE); OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN); OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); @@ -280,9 +274,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); OPAL_CALL(opal_get_param, OPAL_GET_PARAM); OPAL_CALL(opal_set_param, OPAL_SET_PARAM); OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); -OPAL_CALL_REAL(opal_rm_handle_hmi, OPAL_HANDLE_HMI); OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE); -OPAL_CALL_REAL(opal_rm_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE); OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); @@ -304,11 +296,8 @@ OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE); OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE); OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE); OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR); -OPAL_CALL_REAL(opal_rm_int_get_xirr, OPAL_INT_GET_XIRR); OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR); OPAL_CALL(opal_int_eoi, OPAL_INT_EOI); -OPAL_CALL_REAL(opal_rm_int_eoi, OPAL_INT_EOI); OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR); -OPAL_CALL_REAL(opal_rm_int_set_mfrr, OPAL_INT_SET_MFRR); OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL); -OPAL_CALL_REAL(opal_rm_pci_tce_kill, OPAL_PCI_TCE_KILL); +OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 282293572dc8..86d9fde93c17 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -875,6 +875,17 @@ int opal_error_code(int rc) } } +void powernv_set_nmmu_ptcr(unsigned long ptcr) +{ + int rc; + + if (firmware_has_feature(FW_FEATURE_OPAL)) { + rc = opal_nmmu_set_ptcr(-1UL, ptcr); + if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) + pr_warn("%s: Unable to set nest mmu ptcr\n", __func__); + } +} + EXPORT_SYMBOL_GPL(opal_poll_events); EXPORT_SYMBOL_GPL(opal_rtc_read); EXPORT_SYMBOL_GPL(opal_rtc_write); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index b07680cd2518..8278f43ad4b8 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1326,7 +1326,9 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) else m64_bars = 1; - pdn->m64_map = kmalloc(sizeof(*pdn->m64_map) * m64_bars, GFP_KERNEL); + pdn->m64_map = kmalloc_array(m64_bars, + sizeof(*pdn->m64_map), + GFP_KERNEL); if (!pdn->m64_map) return -ENOMEM; /* Initialize the m64_map to IODA_INVALID_M64 */ @@ -1593,8 +1595,9 @@ int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) /* Allocating pe_num_map */ if (pdn->m64_single_mode) - pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs, - GFP_KERNEL); + pdn->pe_num_map = kmalloc_array(num_vfs, + sizeof(*pdn->pe_num_map), + GFP_KERNEL); else pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL); @@ -1950,7 +1953,12 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, struct pnv_phb *phb = pe->phb; unsigned int shift = tbl->it_page_shift; - if (phb->type == PNV_PHB_NPU) { + /* + * NVLink1 can use the TCE kill register directly as + * it's the same as PHB3. NVLink2 is different and + * should go via the OPAL call. + */ + if (phb->model == PNV_PHB_MODEL_NPU) { /* * The NVLink hardware does not support TCE kill * per TCE entry so we have to invalidate @@ -1962,11 +1970,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) pnv_pci_phb3_tce_invalidate(pe, rm, shift, index, npages); - else if (rm) - opal_rm_pci_tce_kill(phb->opal_id, - OPAL_PCI_TCE_KILL_PAGES, - pe->pe_number, 1u << shift, - index << shift, npages); else opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PAGES, @@ -3671,6 +3674,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->model = PNV_PHB_MODEL_PHB3; else if (of_device_is_compatible(np, "ibm,power8-npu-pciex")) phb->model = PNV_PHB_MODEL_NPU; + else if (of_device_is_compatible(np, "ibm,power9-npu-pciex")) + phb->model = PNV_PHB_MODEL_NPU2; else phb->model = PNV_PHB_MODEL_UNKNOWN; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index c6d554fe585c..eb835e977e33 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -940,6 +940,13 @@ void __init pnv_pci_init(void) for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") pnv_pci_init_npu_phb(np); + /* + * Look for NPU2 PHBs which we treat mostly as NPU PHBs with + * the exception of TCE kill which requires an OPAL call. + */ + for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") + pnv_pci_init_npu_phb(np); + /* Configure IOMMU DMA hooks */ set_pci_dma_ops(&dma_iommu_ops); } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index e64df7894d6e..e1d3e5526b54 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -19,6 +19,7 @@ enum pnv_phb_model { PNV_PHB_MODEL_P7IOC, PNV_PHB_MODEL_PHB3, PNV_PHB_MODEL_NPU, + PNV_PHB_MODEL_NPU2, }; #define PNV_PCI_DIAG_BUF_SIZE 8192 diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index da7c843ac7f1..613052232475 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -18,7 +18,8 @@ static inline void pnv_pci_shutdown(void) { } #endif extern u32 pnv_get_supported_cpuidle_states(void); -extern u64 pnv_deepest_stop_state; +extern u64 pnv_deepest_stop_psscr_val; +extern u64 pnv_deepest_stop_psscr_mask; extern void pnv_lpc_init(void); diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index eec0e8d0454d..e39e6c428af1 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -184,15 +184,17 @@ static void pnv_smp_cpu_kill_self(void) ppc64_runlatch_off(); - if (cpu_has_feature(CPU_FTR_ARCH_300)) - srr1 = power9_idle_stop(pnv_deepest_stop_state); - else if (idle_states & OPAL_PM_WINKLE_ENABLED) + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val, + pnv_deepest_stop_psscr_mask); + } else if (idle_states & OPAL_PM_WINKLE_ENABLED) { srr1 = power7_winkle(); - else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || - (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) + } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || + (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { srr1 = power7_sleep(); - else + } else { srr1 = power7_nap(1); + } ppc64_runlatch_on(); diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index e1c280a95d58..30ec04f1c67c 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -17,7 +17,6 @@ config PPC_PSERIES select PPC_UDBG_16550 select PPC_NATIVE select PPC_DOORBELL - select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP select ARCH_RANDOM select PPC_DOORBELL diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 4839db385bb0..4ac419c7eb4c 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -76,7 +76,7 @@ module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. " "[Default=" __stringify(CMM_DEFAULT_DELAY) "]"); module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove " +MODULE_PARM_DESC(hotplug_delay, "Delay (in seconds) after memory hotplug remove " "before loaning resumes. " "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]"); module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR); diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 5cb2e4beffc5..d3a81e746fc4 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -551,7 +551,13 @@ dlpar_store_out: return rc ? rc : count; } -static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store); +static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", "memory,cpu"); +} + +static CLASS_ATTR(dlpar, S_IWUSR | S_IRUSR, dlpar_show, dlpar_store); static int __init pseries_dlpar_init(void) { diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index ea7f09bd73b1..63cc82ad58ac 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -64,6 +64,7 @@ hypertas_fw_features_table[] = { {FW_FEATURE_VPHN, "hcall-vphn"}, {FW_FEATURE_SET_MODE, "hcall-set-mode"}, {FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"}, + {FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"}, }; /* Build up the firmware features bitmask using the contents of @@ -126,7 +127,7 @@ static void __init fw_vec5_feature_init(const char *vec5, unsigned long len) index = OV5_INDX(vec5_fw_features_table[i].feature); feat = OV5_FEAT(vec5_fw_features_table[i].feature); - if (vec5[index] & feat) + if (index < len && (vec5[index] & feat)) powerpc_firmware_features |= vec5_fw_features_table[i].val; } diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2617f9f356bd..3381c20edbc0 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -446,9 +446,7 @@ static int dlpar_remove_lmb(struct of_drconf_cell *lmb) /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); - dlpar_release_drc(lmb->drc_index); dlpar_remove_device_tree_lmb(lmb); - return 0; } @@ -516,6 +514,7 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, if (!lmbs[i].reserved) continue; + dlpar_release_drc(lmbs[i].drc_index); pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr); @@ -545,6 +544,9 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) if (lmbs[i].drc_index == drc_index) { lmb_found = 1; rc = dlpar_remove_lmb(&lmbs[i]); + if (!rc) + dlpar_release_drc(lmbs[i].drc_index); + break; } } @@ -561,6 +563,44 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) return rc; } +static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) +{ + struct of_drconf_cell *lmbs; + u32 num_lmbs, *p; + int lmb_found; + int i, rc; + + pr_info("Attempting to update LMB, drc index %x\n", drc_index); + + p = prop->value; + num_lmbs = *p++; + lmbs = (struct of_drconf_cell *)p; + + lmb_found = 0; + for (i = 0; i < num_lmbs; i++) { + if (lmbs[i].drc_index == drc_index) { + lmb_found = 1; + rc = dlpar_remove_lmb(&lmbs[i]); + if (!rc) { + rc = dlpar_add_lmb(&lmbs[i]); + if (rc) + dlpar_release_drc(lmbs[i].drc_index); + } + break; + } + } + + if (!lmb_found) + rc = -EINVAL; + + if (rc) + pr_info("Failed to update memory at %llx\n", + lmbs[i].base_addr); + else + pr_info("Memory at %llx was updated\n", lmbs[i].base_addr); + + return rc; +} #else static inline int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) @@ -599,10 +639,6 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; - rc = dlpar_acquire_drc(lmb->drc_index); - if (rc) - return rc; - rc = dlpar_add_device_tree_lmb(lmb); if (rc) { pr_err("Couldn't update device tree for drc index %x\n", @@ -618,12 +654,10 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) /* Add the memory */ rc = add_memory(nid, lmb->base_addr, block_sz); - if (rc) { + if (rc) dlpar_remove_device_tree_lmb(lmb); - dlpar_release_drc(lmb->drc_index); - } else { + else lmb->flags |= DRCONF_MEM_ASSIGNED; - } return rc; } @@ -655,10 +689,16 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) return -EINVAL; for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { - rc = dlpar_add_lmb(&lmbs[i]); + rc = dlpar_acquire_drc(lmbs[i].drc_index); if (rc) continue; + rc = dlpar_add_lmb(&lmbs[i]); + if (rc) { + dlpar_release_drc(lmbs[i].drc_index); + continue; + } + lmbs_added++; /* Mark this lmb so we can remove it later if all of the @@ -678,6 +718,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) if (rc) pr_err("Failed to remove LMB, drc index %x\n", be32_to_cpu(lmbs[i].drc_index)); + else + dlpar_release_drc(lmbs[i].drc_index); } rc = -EINVAL; } else { @@ -711,7 +753,13 @@ static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) for (i = 0; i < num_lmbs; i++) { if (lmbs[i].drc_index == drc_index) { lmb_found = 1; - rc = dlpar_add_lmb(&lmbs[i]); + rc = dlpar_acquire_drc(lmbs[i].drc_index); + if (!rc) { + rc = dlpar_add_lmb(&lmbs[i]); + if (rc) + dlpar_release_drc(lmbs[i].drc_index); + } + break; } } @@ -769,6 +817,9 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) else rc = -EINVAL; break; + case PSERIES_HP_ELOG_ACTION_READD: + rc = dlpar_memory_readd_by_index(drc_index, prop); + break; default: pr_err("Invalid action (%d) specified\n", hp_elog->action); rc = -EINVAL; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5dc1c3c6e716..251060cf1713 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -27,6 +27,8 @@ #include <linux/console.h> #include <linux/export.h> #include <linux/jump_label.h> +#include <linux/delay.h> +#include <linux/stop_machine.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/page.h> @@ -609,6 +611,135 @@ static int __init disable_bulk_remove(char *str) __setup("bulk_remove=", disable_bulk_remove); +#define HPT_RESIZE_TIMEOUT 10000 /* ms */ + +struct hpt_resize_state { + unsigned long shift; + int commit_rc; +}; + +static int pseries_lpar_resize_hpt_commit(void *data) +{ + struct hpt_resize_state *state = data; + + state->commit_rc = plpar_resize_hpt_commit(0, state->shift); + if (state->commit_rc != H_SUCCESS) + return -EIO; + + /* Hypervisor has transitioned the HTAB, update our globals */ + ppc64_pft_size = state->shift; + htab_size_bytes = 1UL << ppc64_pft_size; + htab_hash_mask = (htab_size_bytes >> 7) - 1; + + return 0; +} + +/* Must be called in user context */ +static int pseries_lpar_resize_hpt(unsigned long shift) +{ + struct hpt_resize_state state = { + .shift = shift, + .commit_rc = H_FUNCTION, + }; + unsigned int delay, total_delay = 0; + int rc; + ktime_t t0, t1, t2; + + might_sleep(); + + if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE)) + return -ENODEV; + + printk(KERN_INFO "lpar: Attempting to resize HPT to shift %lu\n", + shift); + + t0 = ktime_get(); + + rc = plpar_resize_hpt_prepare(0, shift); + while (H_IS_LONG_BUSY(rc)) { + delay = get_longbusy_msecs(rc); + total_delay += delay; + if (total_delay > HPT_RESIZE_TIMEOUT) { + /* prepare with shift==0 cancels an in-progress resize */ + rc = plpar_resize_hpt_prepare(0, 0); + if (rc != H_SUCCESS) + printk(KERN_WARNING + "lpar: Unexpected error %d cancelling timed out HPT resize\n", + rc); + return -ETIMEDOUT; + } + msleep(delay); + rc = plpar_resize_hpt_prepare(0, shift); + }; + + switch (rc) { + case H_SUCCESS: + /* Continue on */ + break; + + case H_PARAMETER: + return -EINVAL; + case H_RESOURCE: + return -EPERM; + default: + printk(KERN_WARNING + "lpar: Unexpected error %d from H_RESIZE_HPT_PREPARE\n", + rc); + return -EIO; + } + + t1 = ktime_get(); + + rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL); + + t2 = ktime_get(); + + if (rc != 0) { + switch (state.commit_rc) { + case H_PTEG_FULL: + printk(KERN_WARNING + "lpar: Hash collision while resizing HPT\n"); + return -ENOSPC; + + default: + printk(KERN_WARNING + "lpar: Unexpected error %d from H_RESIZE_HPT_COMMIT\n", + state.commit_rc); + return -EIO; + }; + } + + printk(KERN_INFO + "lpar: HPT resize to shift %lu complete (%lld ms / %lld ms)\n", + shift, (long long) ktime_ms_delta(t1, t0), + (long long) ktime_ms_delta(t2, t1)); + + return 0; +} + +/* Actually only used for radix, so far */ +static int pseries_lpar_register_process_table(unsigned long base, + unsigned long page_size, unsigned long table_size) +{ + long rc; + unsigned long flags = PROC_TABLE_NEW; + + if (radix_enabled()) + flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; + for (;;) { + rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, + page_size, table_size); + if (!H_IS_LONG_BUSY(rc)) + break; + mdelay(get_longbusy_msecs(rc)); + } + if (rc != H_SUCCESS) { + pr_err("Failed to register process table (rc=%ld)\n", rc); + BUG(); + } + return rc; +} + void __init hpte_init_pseries(void) { mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; @@ -620,6 +751,13 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; + mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; +} + +void radix_init_pseries(void) +{ + pr_info("Using radix MMU under hypervisor\n"); + register_process_table = pseries_lpar_register_process_table; } #ifdef CONFIG_PPC_SMLPAR diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index a560a98bcf3b..5a0c7ba429ce 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -39,6 +39,7 @@ struct update_props_workarea { #define ADD_DT_NODE 0x03000000 #define MIGRATION_SCOPE (1) +#define PRRN_SCOPE -2 static int mobility_rtas_call(int token, char *buf, s32 scope) { @@ -236,6 +237,35 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) return rc; } +static void prrn_update_node(__be32 phandle) +{ + struct pseries_hp_errorlog *hp_elog; + struct device_node *dn; + + /* + * If a node is found from a the given phandle, the phandle does not + * represent the drc index of an LMB and we can ignore. + */ + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); + if (dn) { + of_node_put(dn); + return; + } + + hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); + if(!hp_elog) + return; + + hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; + hp_elog->action = PSERIES_HP_ELOG_ACTION_READD; + hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; + hp_elog->_drc_u.drc_index = phandle; + + queue_hotplug_event(hp_elog, NULL, NULL); + + kfree(hp_elog); +} + int pseries_devicetree_update(s32 scope) { char *rtas_buf; @@ -274,6 +304,10 @@ int pseries_devicetree_update(s32 scope) break; case UPDATE_DT_NODE: update_dt_node(phandle, scope); + + if (scope == PRRN_SCOPE) + prrn_update_node(phandle); + break; case ADD_DT_NODE: drc_index = *data++; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 7736352f7279..b4d362ed03a1 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -66,6 +66,7 @@ #include <asm/reg.h> #include <asm/plpar_wrappers.h> #include <asm/kexec.h> +#include <asm/isa-bridge.h> #include "pseries.h" |