summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 14:15:31 +0300
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 15:09:51 +0300
commit7defb0f879bbcfe29e3c6f29d685d4f29b7a0700 (patch)
tree4fa8b96fb0e52cebf90f26911929a2c3e54bde49 /kernel
parent5b0311e1f2464547fc6f17a82d7ea2538c8c7a70 (diff)
downloadlinux-7defb0f879bbcfe29e3c6f29d685d4f29b7a0700.tar.xz
perf: Don't schedule out/in pinned events on task tick
We don't need to schedule in/out pinned events on task tick, now that pinned and flexible groups can be scheduled separately. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index bfc4ee015c87..a90ae694cbc1 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1525,17 +1525,17 @@ void perf_event_task_tick(struct task_struct *curr)
if (ctx)
perf_ctx_adjust_freq(ctx);
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_out(ctx, EVENT_ALL);
+ task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_ALL);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_in(curr, EVENT_ALL);
+ task_ctx_sched_in(curr, EVENT_FLEXIBLE);
}
static int event_enable_on_exec(struct perf_event *event,