diff options
author | Aegis Lin <aegislin@gmail.com> | 2007-12-20 10:39:59 +0300 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 11:46:21 +0300 |
commit | 90608a2928a86b9464a8698275a1138e82e3a010 (patch) | |
tree | 69d1aa5589e2f3caac679ab6efd292335f0fcd8d /arch/powerpc/platforms/cell/spufs/sched.c | |
parent | c9101bdb1b0c56a75a4618542d368fe5013946b9 (diff) | |
download | linux-90608a2928a86b9464a8698275a1138e82e3a010.tar.xz |
[POWERPC] spufs: Use separate timer for /proc/spu_loadavg calculation
The original spusched_timer was designed to take effect only when
a context is waiting in the runqueue.
This change adds an additional lower-freq timer has been added to
purely handle the spu_load updates. The new timer will be triggered
per LOAD_FREQ ticks.
Signed-off-by: Aegis Lin <aegislin@gmail.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index eee7cef28f1a..8c8af11b35b4 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3]; static struct spu_prio_array *spu_prio; static struct task_struct *spusched_task; static struct timer_list spusched_timer; +static struct timer_list spuloadavg_timer; /* * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). @@ -922,35 +923,31 @@ static unsigned long count_active_contexts(void) } /** - * spu_calc_load - given tick count, update the avenrun load estimates. - * @tick: tick count + * spu_calc_load - update the avenrun load estimates. * * No locking against reading these values from userspace, as for * the CPU loadavg code. */ -static void spu_calc_load(unsigned long ticks) +static void spu_calc_load(void) { unsigned long active_tasks; /* fixed-point */ - static int count = LOAD_FREQ; - - count -= ticks; - - if (unlikely(count < 0)) { - active_tasks = count_active_contexts() * FIXED_1; - do { - CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); - CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); - CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); - count += LOAD_FREQ; - } while (count < 0); - } + + active_tasks = count_active_contexts() * FIXED_1; + CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); + CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); + CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); } static void spusched_wake(unsigned long data) { mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); wake_up_process(spusched_task); - spu_calc_load(SPUSCHED_TICK); +} + +static void spuloadavg_wake(unsigned long data) +{ + mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ); + spu_calc_load(); } static int spusched_thread(void *unused) @@ -1068,6 +1065,7 @@ int __init spu_sched_init(void) spin_lock_init(&spu_prio->runq_lock); setup_timer(&spusched_timer, spusched_wake, 0); + setup_timer(&spuloadavg_timer, spuloadavg_wake, 0); spusched_task = kthread_run(spusched_thread, NULL, "spusched"); if (IS_ERR(spusched_task)) { @@ -1075,6 +1073,8 @@ int __init spu_sched_init(void) goto out_free_spu_prio; } + mod_timer(&spuloadavg_timer, 0); + entry = create_proc_entry("spu_loadavg", 0, NULL); if (!entry) goto out_stop_kthread; @@ -1100,6 +1100,7 @@ void spu_sched_exit(void) remove_proc_entry("spu_loadavg", NULL); del_timer_sync(&spusched_timer); + del_timer_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { |