summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
authorLuke Browning <lukebr@linux.vnet.ibm.com>2007-12-20 10:39:59 +0300
committerPaul Mackerras <paulus@samba.org>2007-12-21 11:46:20 +0300
commit91569531d1297db42d68136ac0c85cd85223d0b9 (patch)
tree5e03e7782bd21a3557678c19930ed52e0cae3b9c /arch/powerpc/platforms/cell/spufs/sched.c
parentd6ad39bc53521275d14fde86bfb94d9b2ddb7a08 (diff)
downloadlinux-91569531d1297db42d68136ac0c85cd85223d0b9.tar.xz
[POWERPC] spufs: reorganize spu_run_init
This cleans up spu_run_init so that it does all of the spu initialization for spufs_run_spu. It initializes the spu context as much as possible before it activates the spu and writes the runcntl register. Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 82ea576c53a3..ef0e5e230fbb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -105,6 +105,12 @@ void spu_set_timeslice(struct spu_context *ctx)
void __spu_update_sched_info(struct spu_context *ctx)
{
/*
+ * assert that the context is not on the runqueue, so it is safe
+ * to change its scheduling parameters.
+ */
+ BUG_ON(!list_empty(&ctx->rq));
+
+ /*
* 32-Bit assignments are atomic on powerpc, and we don't care about
* memory ordering here because retrieving the controlling thread is
* per definition racy.
@@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx)
ctx->policy = current->policy;
/*
- * A lot of places that don't hold list_mutex poke into
- * cpus_allowed, including grab_runnable_context which
- * already holds the runq_lock. So abuse runq_lock
- * to protect this field as well.
+ * TO DO: the context may be loaded, so we may need to activate
+ * it again on a different node. But it shouldn't hurt anything
+ * to update its parameters, because we know that the scheduler
+ * is not actively looking at this field, since it is not on the
+ * runqueue. The context will be rescheduled on the proper node
+ * if it is timesliced or preempted.
*/
- spin_lock(&spu_prio->runq_lock);
ctx->cpus_allowed = current->cpus_allowed;
- spin_unlock(&spu_prio->runq_lock);
}
void spu_update_sched_info(struct spu_context *ctx)
{
- int node = ctx->spu->node;
+ int node;
- mutex_lock(&cbe_spu_info[node].list_mutex);
- __spu_update_sched_info(ctx);
- mutex_unlock(&cbe_spu_info[node].list_mutex);
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ node = ctx->spu->node;
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ __spu_update_sched_info(ctx);
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ } else {
+ __spu_update_sched_info(ctx);
+ }
}
static int __node_allowed(struct spu_context *ctx, int node)
@@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx)
* higher priority contexts before lower priority
* ones, so this is safe until we introduce
* priority inheritance schemes.
+ *
+ * XXX if the highest priority context is locked,
+ * this can loop a long time. Might be better to
+ * look at another context or give up after X retries.
*/
if (!mutex_trylock(&victim->state_mutex)) {
victim = NULL;