diff options
author | Frederic Weisbecker <frederic@kernel.org> | 2019-10-16 05:56:54 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-10-29 12:01:14 +0300 |
commit | e44fcb4b7a299602fb300b82a546c0b8a50d9d90 (patch) | |
tree | 5c3387074ff92a20f94e34a76694d436d0870ffd | |
parent | 097f2541c6e51e0c1cdb1e6d46ef08a624336518 (diff) | |
download | linux-e44fcb4b7a299602fb300b82a546c0b8a50d9d90.tar.xz |
sched/vtime: Rename vtime_accounting_cpu_enabled() to vtime_accounting_enabled_this_cpu()
Standardize the naming on top of the vtime_accounting_enabled_*() base.
Also make it clear we are checking the vtime state of the
*current* CPU with this function. We'll need to add an API to check that
state on remote CPUs as well, so we must disambiguate the naming.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Jacek Anaszewski <jacek.anaszewski@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J . Wysocki <rjw@rjwysocki.net>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Wanpeng Li <wanpengli@tencent.com>
Cc: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
Link: https://lkml.kernel.org/r/20191016025700.31277-9-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/context_tracking.h | 4 | ||||
-rw-r--r-- | include/linux/vtime.h | 10 | ||||
-rw-r--r-- | kernel/sched/cputime.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 |
4 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index c9065ad518a7..64ec82851aa3 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -103,7 +103,7 @@ static inline void context_tracking_init(void) { } /* must be called with irqs disabled */ static inline void guest_enter_irqoff(void) { - if (vtime_accounting_cpu_enabled()) + if (vtime_accounting_enabled_this_cpu()) vtime_guest_enter(current); else current->flags |= PF_VCPU; @@ -127,7 +127,7 @@ static inline void guest_exit_irqoff(void) if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_GUEST); - if (vtime_accounting_cpu_enabled()) + if (vtime_accounting_enabled_this_cpu()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 54e91511250b..eb2e7a19054b 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -11,11 +11,11 @@ struct task_struct; /* - * vtime_accounting_cpu_enabled() definitions/declarations + * vtime_accounting_enabled_this_cpu() definitions/declarations */ #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) -static inline bool vtime_accounting_cpu_enabled(void) { return true; } +static inline bool vtime_accounting_enabled_this_cpu(void) { return true; } extern void vtime_task_switch(struct task_struct *prev); #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) @@ -31,7 +31,7 @@ static inline bool vtime_accounting_enabled(void) return context_tracking_enabled(); } -static inline bool vtime_accounting_cpu_enabled(void) +static inline bool vtime_accounting_enabled_this_cpu(void) { if (vtime_accounting_enabled()) { if (context_tracking_enabled_this_cpu()) @@ -45,13 +45,13 @@ extern void vtime_task_switch_generic(struct task_struct *prev); static inline void vtime_task_switch(struct task_struct *prev) { - if (vtime_accounting_cpu_enabled()) + if (vtime_accounting_enabled_this_cpu()) vtime_task_switch_generic(prev); } #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline bool vtime_accounting_cpu_enabled(void) { return false; } +static inline bool vtime_accounting_enabled_this_cpu(void) { return false; } static inline void vtime_task_switch(struct task_struct *prev) { } #endif diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 34086afc3518..b931a19df093 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -475,7 +475,7 @@ void account_process_tick(struct task_struct *p, int user_tick) u64 cputime, steal; struct rq *rq = this_rq(); - if (vtime_accounting_cpu_enabled()) + if (vtime_accounting_enabled_this_cpu()) return; if (sched_clock_irqtime) { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 955851748dc3..c2748232f607 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1119,7 +1119,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE unsigned long ticks; - if (vtime_accounting_cpu_enabled()) + if (vtime_accounting_enabled_this_cpu()) return; /* * We stopped the tick in idle. Update process times would miss the |