summaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev/xive
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev/xive')
-rw-r--r--arch/powerpc/sysdev/xive/common.c30
-rw-r--r--arch/powerpc/sysdev/xive/native.c34
2 files changed, 17 insertions, 47 deletions
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 3459015092fa..e8f5b0551095 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -319,7 +319,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* The FW told us to call it. This happens for some
* interrupt sources that need additional HW whacking
* beyond the ESB manipulation. For example LPC interrupts
- * on P9 DD1.0 need a latch to be clared in the LPC bridge
+ * on P9 DD1.0 needed a latch to be clared in the LPC bridge
* itself. The Firmware will take care of it.
*/
if (WARN_ON_ONCE(!xive_ops->eoi))
@@ -337,9 +337,9 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* This allows us to then do a re-trigger if Q was set
* rather than synthesizing an interrupt in software
*
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
@@ -1408,28 +1408,6 @@ void xive_teardown_cpu(void)
xive_cleanup_cpu_queues(cpu, xc);
}
-void xive_kexec_teardown_cpu(int secondary)
-{
- struct xive_cpu *xc = __this_cpu_read(xive_cpu);
- unsigned int cpu = smp_processor_id();
-
- /* Set CPPR to 0 to disable flow of interrupts */
- xc->cppr = 0;
- out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
-
- /* Backend cleanup if any */
- if (xive_ops->teardown_cpu)
- xive_ops->teardown_cpu(cpu, xc);
-
-#ifdef CONFIG_SMP
- /* Get rid of IPI */
- xive_cleanup_cpu_ipi(cpu, xc);
-#endif
-
- /* Disable and free the queues */
- xive_cleanup_cpu_queues(cpu, xc);
-}
-
void xive_shutdown(void)
{
xive_ops->shutdown();
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 311185b9960a..5b20a678d755 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -109,7 +109,7 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc == 0 ? 0 : -ENXIO;
}
@@ -163,7 +163,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
@@ -190,7 +190,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
@@ -253,7 +253,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
irq = opal_xive_allocate_irq(chip_id);
if (irq == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
if (irq < 0) {
@@ -275,7 +275,7 @@ u32 xive_native_alloc_irq(void)
rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc < 0)
return 0;
@@ -289,7 +289,7 @@ void xive_native_free_irq(u32 irq)
s64 rc = opal_xive_free_irq(irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
EXPORT_SYMBOL_GPL(xive_native_free_irq);
@@ -305,7 +305,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
if (rc == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
xc->hw_ipi = 0;
@@ -395,12 +395,11 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
- pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
@@ -415,16 +414,9 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
}
vp_cam = be64_to_cpu(vp_cam_be);
- pr_debug("VP CAM = %llx\n", vp_cam);
-
/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
- pr_debug("(Old HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
- out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
- TM_QW2W2_VP | vp_cam);
- pr_debug("(New HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
+ out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
}
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -444,7 +436,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
@@ -645,7 +637,7 @@ u32 xive_native_alloc_vp_block(u32 max_vcpus)
rc = opal_xive_alloc_vp_block(order);
switch (rc) {
case OPAL_BUSY:
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
break;
case OPAL_XIVE_PROVISIONING:
if (!xive_native_provision_pages())
@@ -687,7 +679,7 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
rc = opal_xive_set_vp_info(vp_id, flags, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}
@@ -701,7 +693,7 @@ int xive_native_disable_vp(u32 vp_id)
rc = opal_xive_set_vp_info(vp_id, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}