diff options
author | Shang XiaoJing <shangxiaojing@huawei.com> | 2022-08-24 11:28:56 +0300 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-08-27 01:05:35 +0300 |
commit | 5531ecffa4b923bc7739e9ea73c552d80af602dc (patch) | |
tree | 5fa795ee0847e07754e96043ee214525dc7c551c /kernel/sched | |
parent | e4fe074d6c359c19b74564fa1364fe48343cfa5d (diff) | |
download | linux-5531ecffa4b923bc7739e9ea73c552d80af602dc.tar.xz |
sched: Add update_current_exec_runtime helper
Wrap repeated code in helper function update_current_exec_runtime for
update the exec time of the current.
Signed-off-by: Shang XiaoJing <shangxiaojing@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220824082856.15674-1-shangxiaojing@huawei.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/deadline.c | 6 | ||||
-rw-r--r-- | kernel/sched/rt.c | 6 | ||||
-rw-r--r-- | kernel/sched/sched.h | 10 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 11 |
4 files changed, 16 insertions, 17 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 1d9c90958baa..4cbdc0c560da 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1333,11 +1333,7 @@ static void update_curr_dl(struct rq *rq) trace_sched_stat_runtime(curr, delta_exec, 0); - curr->se.sum_exec_runtime += delta_exec; - account_group_exec_runtime(curr, delta_exec); - - curr->se.exec_start = now; - cgroup_account_cputime(curr, delta_exec); + update_current_exec_runtime(curr, now, delta_exec); if (dl_entity_is_special(dl_se)) return; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 054b6711e961..4bc84a1135db 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1062,11 +1062,7 @@ static void update_curr_rt(struct rq *rq) trace_sched_stat_runtime(curr, delta_exec, 0); - curr->se.sum_exec_runtime += delta_exec; - account_group_exec_runtime(curr, delta_exec); - - curr->se.exec_start = now; - cgroup_account_cputime(curr, delta_exec); + update_current_exec_runtime(curr, now, delta_exec); if (!rt_bandwidth_enabled()) return; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 74130a69d365..da17be6f27fd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3158,4 +3158,14 @@ extern int sched_dynamic_mode(const char *str); extern void sched_dynamic_update(int mode); #endif +static inline void update_current_exec_runtime(struct task_struct *curr, + u64 now, u64 delta_exec) +{ + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + + curr->se.exec_start = now; + cgroup_account_cputime(curr, delta_exec); +} + #endif /* _KERNEL_SCHED_SCHED_H */ diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index d04073a93eb4..85590599b4d6 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -71,20 +71,17 @@ static void yield_task_stop(struct rq *rq) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { struct task_struct *curr = rq->curr; - u64 delta_exec; + u64 now, delta_exec; - delta_exec = rq_clock_task(rq) - curr->se.exec_start; + now = rq_clock_task(rq); + delta_exec = now - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; schedstat_set(curr->stats.exec_max, max(curr->stats.exec_max, delta_exec)); - curr->se.sum_exec_runtime += delta_exec; - account_group_exec_runtime(curr, delta_exec); - - curr->se.exec_start = rq_clock_task(rq); - cgroup_account_cputime(curr, delta_exec); + update_current_exec_runtime(curr, now, delta_exec); } /* |