summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-20 10:39:59 +0300
committerPaul Mackerras <paulus@samba.org>2007-12-21 11:46:20 +0300
commitd6ad39bc53521275d14fde86bfb94d9b2ddb7a08 (patch)
tree07dcc592b343395cb7fbfb3053aa21103fb94352 /arch/powerpc
parent8af30675c3e7b945bbaf6f57b724f246e56eb209 (diff)
downloadlinux-d6ad39bc53521275d14fde86bfb94d9b2ddb7a08.tar.xz
[POWERPC] spufs: rework class 0 and 1 interrupt handling
Based on original patches from Arnd Bergmann <arnd.bergman@de.ibm.com>; and Luke Browning <lukebr@linux.vnet.ibm.com> Currently, spu contexts need to be loaded to the SPU in order to take class 0 and class 1 exceptions. This change makes the actual interrupt-handlers much simpler (ie, set the exception information in the context save area), and defers the handling code to the spufs_handle_class[01] functions, called from spufs_run_spu. This should improve the concurrency of the spu scheduling leading to greater SPU utilization when SPUs are overcommited. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c57
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c107
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c50
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c7
6 files changed, 107 insertions, 117 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index a560277b3ad7..fb266ae32095 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -132,27 +132,6 @@ int spu_64k_pages_available(void)
}
EXPORT_SYMBOL_GPL(spu_64k_pages_available);
-static int __spu_trap_invalid_dma(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
- return 0;
-}
-
-static int __spu_trap_dma_align(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
- return 0;
-}
-
-static int __spu_trap_error(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
- return 0;
-}
-
static void spu_restart_dma(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -252,10 +231,12 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
return 1;
}
+ spu->class_0_pending = 0;
spu->dar = ea;
spu->dsisr = dsisr;
- mb();
+
spu->stop_callback(spu);
+
return 0;
}
@@ -335,12 +316,13 @@ spu_irq_class_0(int irq, void *data)
spu = data;
+ spin_lock(&spu->register_lock);
mask = spu_int_mask_get(spu, 0);
- stat = spu_int_stat_get(spu, 0);
- stat &= mask;
+ stat = spu_int_stat_get(spu, 0) & mask;
- spin_lock(&spu->register_lock);
spu->class_0_pending |= stat;
+ spu->dsisr = spu_mfc_dsisr_get(spu);
+ spu->dar = spu_mfc_dar_get(spu);
spin_unlock(&spu->register_lock);
spu->stop_callback(spu);
@@ -350,31 +332,6 @@ spu_irq_class_0(int irq, void *data)
return IRQ_HANDLED;
}
-int
-spu_irq_class_0_bottom(struct spu *spu)
-{
- unsigned long flags;
- unsigned long stat;
-
- spin_lock_irqsave(&spu->register_lock, flags);
- stat = spu->class_0_pending;
- spu->class_0_pending = 0;
-
- if (stat & CLASS0_DMA_ALIGNMENT_INTR)
- __spu_trap_dma_align(spu);
-
- if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
- __spu_trap_invalid_dma(spu);
-
- if (stat & CLASS0_SPU_ERROR_INTR)
- __spu_trap_error(spu);
-
- spin_unlock_irqrestore(&spu->register_lock, flags);
-
- return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
-
static irqreturn_t
spu_irq_class_1(int irq, void *data)
{
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 0635f292ae19..720e111f1f6a 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -28,46 +28,69 @@
#include "spufs.h"
-static void spufs_handle_dma_error(struct spu_context *ctx,
+/**
+ * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
+ *
+ * If the context was created with events, we just set the return event.
+ * Otherwise, send an appropriate signal to the process.
+ */
+static void spufs_handle_event(struct spu_context *ctx,
unsigned long ea, int type)
{
+ siginfo_t info;
+
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
ctx->event_return |= type;
wake_up_all(&ctx->stop_wq);
- } else {
- siginfo_t info;
- memset(&info, 0, sizeof(info));
-
- switch (type) {
- case SPE_EVENT_INVALID_DMA:
- info.si_signo = SIGBUS;
- info.si_code = BUS_OBJERR;
- break;
- case SPE_EVENT_SPE_DATA_STORAGE:
- info.si_signo = SIGBUS;
- info.si_addr = (void __user *)ea;
- info.si_code = BUS_ADRERR;
- break;
- case SPE_EVENT_DMA_ALIGNMENT:
- info.si_signo = SIGBUS;
- /* DAR isn't set for an alignment fault :( */
- info.si_code = BUS_ADRALN;
- break;
- case SPE_EVENT_SPE_ERROR:
- info.si_signo = SIGILL;
- info.si_addr = (void __user *)(unsigned long)
- ctx->ops->npc_read(ctx) - 4;
- info.si_code = ILL_ILLOPC;
- break;
- }
- if (info.si_signo)
- force_sig_info(info.si_signo, &info, current);
+ return;
}
+
+ memset(&info, 0, sizeof(info));
+
+ switch (type) {
+ case SPE_EVENT_INVALID_DMA:
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_OBJERR;
+ break;
+ case SPE_EVENT_SPE_DATA_STORAGE:
+ info.si_signo = SIGBUS;
+ info.si_addr = (void __user *)ea;
+ info.si_code = BUS_ADRERR;
+ break;
+ case SPE_EVENT_DMA_ALIGNMENT:
+ info.si_signo = SIGBUS;
+ /* DAR isn't set for an alignment fault :( */
+ info.si_code = BUS_ADRALN;
+ break;
+ case SPE_EVENT_SPE_ERROR:
+ info.si_signo = SIGILL;
+ info.si_addr = (void __user *)(unsigned long)
+ ctx->ops->npc_read(ctx) - 4;
+ info.si_code = ILL_ILLOPC;
+ break;
+ }
+
+ if (info.si_signo)
+ force_sig_info(info.si_signo, &info, current);
}
-void spufs_dma_callback(struct spu *spu, int type)
+int spufs_handle_class0(struct spu_context *ctx)
{
- spufs_handle_dma_error(spu->ctx, spu->dar, type);
+ unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
+
+ if (likely(!stat))
+ return 0;
+
+ if (stat & CLASS0_DMA_ALIGNMENT_INTR)
+ spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
+
+ if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
+ spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
+
+ if (stat & CLASS0_SPU_ERROR_INTR)
+ spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
+
+ return -EIO;
}
/*
@@ -95,16 +118,8 @@ int spufs_handle_class1(struct spu_context *ctx)
* in time, we can still expect to get the same fault
* the immediately after the context restore.
*/
- if (ctx->state == SPU_STATE_RUNNABLE) {
- ea = ctx->spu->dar;
- dsisr = ctx->spu->dsisr;
- ctx->spu->dar= ctx->spu->dsisr = 0;
- } else {
- ea = ctx->csa.priv1.mfc_dar_RW;
- dsisr = ctx->csa.priv1.mfc_dsisr_RW;
- ctx->csa.priv1.mfc_dar_RW = 0;
- ctx->csa.priv1.mfc_dsisr_RW = 0;
- }
+ ea = ctx->csa.dar;
+ dsisr = ctx->csa.dsisr;
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0;
@@ -132,6 +147,14 @@ int spufs_handle_class1(struct spu_context *ctx)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx);
+
+ /*
+ * Clear dsisr under ctxt lock after handling the fault, so that
+ * time slicing will not preempt the context while the page fault
+ * handler is running. Context switch code removes mappings.
+ */
+ ctx->csa.dar = ctx->csa.dsisr = 0;
+
/*
* If we handled the fault successfully and are in runnable
* state, restart the DMA.
@@ -152,7 +175,7 @@ int spufs_handle_class1(struct spu_context *ctx)
if (ctx->spu)
ctx->ops->restart_dma(ctx);
} else
- spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+ spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index b3cc1dd72185..3b3de6c7ee5b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -15,7 +15,30 @@ void spufs_stop_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;
- wake_up_all(&ctx->stop_wq);
+ /*
+ * It should be impossible to preempt a context while an exception
+ * is being processed, since the context switch code is specially
+ * coded to deal with interrupts ... But, just in case, sanity check
+ * the context pointer. It is OK to return doing nothing since
+ * the exception will be regenerated when the context is resumed.
+ */
+ if (ctx) {
+ /* Copy exception arguments into module specific structure */
+ ctx->csa.class_0_pending = spu->class_0_pending;
+ ctx->csa.dsisr = spu->dsisr;
+ ctx->csa.dar = spu->dar;
+
+ /* ensure that the exception status has hit memory before a
+ * thread waiting on the context's stop queue is woken */
+ smp_wmb();
+
+ wake_up_all(&ctx->stop_wq);
+ }
+
+ /* Clear callback arguments from spu structure */
+ spu->class_0_pending = 0;
+ spu->dsisr = 0;
+ spu->dar = 0;
}
static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
@@ -29,9 +52,9 @@ static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
if (ctx->state != SPU_STATE_RUNNABLE ||
test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
return 1;
- pte_fault = spu->dsisr &
+ pte_fault = ctx->csa.dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
- return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+ return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
1 : 0;
}
@@ -287,18 +310,6 @@ static int spu_process_callback(struct spu_context *ctx)
return ret;
}
-static inline int spu_process_events(struct spu_context *ctx)
-{
- struct spu *spu = ctx->spu;
- int ret = 0;
-
- if (spu->class_0_pending)
- ret = spu_irq_class_0_bottom(spu);
- if (!ret && signal_pending(current))
- ret = -ERESTARTSYS;
- return ret;
-}
-
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
@@ -364,13 +375,20 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
if (ret)
break;
+ ret = spufs_handle_class0(ctx);
+ if (ret)
+ break;
+
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret)
goto out2;
continue;
}
- ret = spu_process_events(ctx);
+
+ if (signal_pending(current))
+ ret = -ERESTARTSYS;
+
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 52215aa2f3c6..82ea576c53a3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -245,7 +245,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = spufs_wbox_callback;
spu->stop_callback = spufs_stop_callback;
spu->mfc_callback = spufs_mfc_callback;
- spu->dma_callback = spufs_dma_callback;
mb();
spu_unmap_mappings(ctx);
spu_restore(&ctx->csa, spu);
@@ -433,7 +432,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = NULL;
spu->stop_callback = NULL;
spu->mfc_callback = NULL;
- spu->dma_callback = NULL;
spu_associate_mm(spu, NULL);
spu->pid = 0;
spu->tgid = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index afdddbc8cf68..eaab1b239d02 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -222,6 +222,7 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* fault handling */
int spufs_handle_class1(struct spu_context *ctx);
+int spufs_handle_class0(struct spu_context *ctx);
/* affinity */
struct spu *affinity_check(struct spu_context *ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index c9600e8e0e16..1a7d7a0f66fc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2077,10 +2077,6 @@ int spu_save(struct spu_state *prev, struct spu *spu)
int rc;
acquire_spu_lock(spu); /* Step 1. */
- prev->dar = spu->dar;
- prev->dsisr = spu->dsisr;
- spu->dar = 0;
- spu->dsisr = 0;
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
release_spu_lock(spu);
if (rc != 0 && rc != 2 && rc != 6) {
@@ -2107,9 +2103,6 @@ int spu_restore(struct spu_state *new, struct spu *spu)
acquire_spu_lock(spu);
harvest(NULL, spu);
spu->slb_replace = 0;
- new->dar = 0;
- new->dsisr = 0;
- spu->class_0_pending = 0;
rc = __do_spu_restore(new, spu);
release_spu_lock(spu);
if (rc) {