summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 04:51:47 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 04:51:47 +0300
commitdbb381b619aa5242c9cb1a8fd54d71c4d79c91eb (patch)
treee2bcd337c4eb7f4f3a44e0f0f05a19e775f18f90 /kernel/time
parent336622e9fce7a0b8243e1cd9f6e1c24d96f13cd8 (diff)
parent4479730e9263befbb9ce9563a09563db2acb8f7c (diff)
downloadlinux-dbb381b619aa5242c9cb1a8fd54d71c4d79c91eb.tar.xz
Merge tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timekeeping and timer updates from Thomas Gleixner: "Core: - Consolidation of the vDSO build infrastructure to address the difficulties of cross-builds for ARM64 compat vDSO libraries by restricting the exposure of header content to the vDSO build. This is achieved by splitting out header content into separate headers. which contain only the minimaly required information which is necessary to build the vDSO. These new headers are included from the kernel headers and the vDSO specific files. - Enhancements to the generic vDSO library allowing more fine grained control over the compiled in code, further reducing architecture specific storage and preparing for adopting the generic library by PPC. - Cleanup and consolidation of the exit related code in posix CPU timers. - Small cleanups and enhancements here and there Drivers: - The obligatory new drivers: Ingenic JZ47xx and X1000 TCU support - Correct the clock rate of PIT64b global clock - setup_irq() cleanup - Preparation for PWM and suspend support for the TI DM timer - Expand the fttmr010 driver to support ast2600 systems - The usual small fixes, enhancements and cleanups all over the place" * tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits) Revert "clocksource/drivers/timer-probe: Avoid creating dead devices" vdso: Fix clocksource.h macro detection um: Fix header inclusion arm64: vdso32: Enable Clang Compilation lib/vdso: Enable common headers arm: vdso: Enable arm to use common headers x86/vdso: Enable x86 to use common headers mips: vdso: Enable mips to use common headers arm64: vdso32: Include common headers in the vdso library arm64: vdso: Include common headers in the vdso library arm64: Introduce asm/vdso/processor.h arm64: vdso32: Code clean up linux/elfnote.h: Replace elf.h with UAPI equivalent scripts: Fix the inclusion order in modpost common: Introduce processor.h linux/ktime.h: Extract common header for vDSO linux/jiffies.h: Extract common header for vDSO linux/time64.h: Extract common header for vDSO linux/time32.h: Extract common header for vDSO linux/time.h: Extract common header for vDSO ...
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c9
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/namespace.c7
-rw-r--r--kernel/time/posix-cpu-timers.c148
-rw-r--r--kernel/time/posix-timers.c3
-rw-r--r--kernel/time/sched_clock.c9
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer.c16
-rw-r--r--kernel/time/vsyscall.c12
9 files changed, 108 insertions, 101 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 428beb69426a..7cb09c4cf21c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -928,6 +928,15 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_arch_init(cs);
+#ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE
+ if (cs->vdso_clock_mode < 0 ||
+ cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
+ pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
+ cs->name, cs->vdso_clock_mode);
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ }
+#endif
+
/* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8cce72501aea..d0a5ba37aff4 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -311,7 +311,7 @@ s64 __ktime_divns(const ktime_t kt, s64 div)
div >>= 1;
}
tmp >>= sft;
- do_div(tmp, (unsigned long) div);
+ do_div(tmp, (u32) div);
return dclc < 0 ? -tmp : tmp;
}
EXPORT_SYMBOL_GPL(__ktime_divns);
diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c
index 12858507d75a..e6ba064ce773 100644
--- a/kernel/time/namespace.c
+++ b/kernel/time/namespace.c
@@ -8,6 +8,7 @@
#include <linux/user_namespace.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
+#include <linux/clocksource.h>
#include <linux/seq_file.h>
#include <linux/proc_ns.h>
#include <linux/export.h>
@@ -172,8 +173,8 @@ static struct timens_offset offset_from_ts(struct timespec64 off)
* for vdso_data->clock_mode is a non-issue. The task is spin waiting for the
* update to finish and for 'seq' to become even anyway.
*
- * Timens page has vdso_data->clock_mode set to VCLOCK_TIMENS which enforces
- * the time namespace handling path.
+ * Timens page has vdso_data->clock_mode set to VDSO_CLOCKMODE_TIMENS which
+ * enforces the time namespace handling path.
*/
static void timens_setup_vdso_data(struct vdso_data *vdata,
struct time_namespace *ns)
@@ -183,7 +184,7 @@ static void timens_setup_vdso_data(struct vdso_data *vdata,
struct timens_offset boottime = offset_from_ts(ns->offsets.boottime);
vdata->seq = 1;
- vdata->clock_mode = VCLOCK_TIMENS;
+ vdata->clock_mode = VDSO_CLOCKMODE_TIMENS;
offset[CLOCK_MONOTONIC] = monotonic;
offset[CLOCK_MONOTONIC_RAW] = monotonic;
offset[CLOCK_MONOTONIC_COARSE] = monotonic;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2c48a7233b19..2fd3b3fa68bf 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -118,6 +118,16 @@ static inline int validate_clock_permissions(const clockid_t clock)
return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
}
+static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer)
+{
+ return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID;
+}
+
+static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
+{
+ return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer));
+}
+
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
@@ -336,9 +346,7 @@ static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
/*
* Sample a process (thread group) clock for the given task clkid. If the
* group's cputime accounting is already enabled, read the atomic
- * store. Otherwise a full update is required. Task's sighand lock must be
- * held to protect the task traversal on a full update. clkid is already
- * validated.
+ * store. Otherwise a full update is required. clkid is already validated.
*/
static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
bool start)
@@ -393,7 +401,12 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
new_timer->kclock = &clock_posix_cpu;
timerqueue_init(&new_timer->it.cpu.node);
- new_timer->it.cpu.task = p;
+ new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer));
+ /*
+ * get_task_for_clock() took a reference on @p. Drop it as the timer
+ * holds a reference on the pid of @p.
+ */
+ put_task_struct(p);
return 0;
}
@@ -406,13 +419,15 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
static int posix_cpu_timer_del(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
struct sighand_struct *sighand;
+ struct task_struct *p;
unsigned long flags;
int ret = 0;
- if (WARN_ON_ONCE(!p))
- return -EINVAL;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Protect against sighand release/switch in exit/exec and process/
@@ -434,8 +449,10 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
unlock_task_sighand(p, &flags);
}
+out:
+ rcu_read_unlock();
if (!ret)
- put_task_struct(p);
+ put_pid(ctmr->pid);
return ret;
}
@@ -484,12 +501,11 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the sighand lock held.
*/
-static void arm_timer(struct k_itimer *timer)
+static void arm_timer(struct k_itimer *timer, struct task_struct *p)
{
int clkidx = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
- struct task_struct *p = ctmr->task;
struct posix_cputimer_base *base;
if (CPUCLOCK_PERTHREAD(timer->it_clock))
@@ -564,13 +580,21 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
u64 old_expires, new_expires, old_incr, val;
struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
struct sighand_struct *sighand;
+ struct task_struct *p;
unsigned long flags;
int ret = 0;
- if (WARN_ON_ONCE(!p))
- return -EINVAL;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p) {
+ /*
+ * If p has just been reaped, we can no
+ * longer get any information about it at all.
+ */
+ rcu_read_unlock();
+ return -ESRCH;
+ }
/*
* Use the to_ktime conversion because that clamps the maximum
@@ -587,8 +611,10 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
* If p has just been reaped, we can no
* longer get any information about it at all.
*/
- if (unlikely(sighand == NULL))
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
return -ESRCH;
+ }
/*
* Disarm any old timer after extracting its expiry time.
@@ -662,7 +688,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
*/
cpu_timer_setexpires(ctmr, new_expires);
if (new_expires != 0 && val < new_expires) {
- arm_timer(timer);
+ arm_timer(timer, p);
}
unlock_task_sighand(p, &flags);
@@ -693,6 +719,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = 0;
out:
+ rcu_read_unlock();
if (old)
old->it_interval = ns_to_timespec64(old_incr);
@@ -704,10 +731,12 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 now, expires = cpu_timer_getexpires(ctmr);
- struct task_struct *p = ctmr->task;
+ struct task_struct *p;
- if (WARN_ON_ONCE(!p))
- return;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Easy part: convert the reload time.
@@ -715,36 +744,15 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
itp->it_interval = ktime_to_timespec64(timer->it_interval);
if (!expires)
- return;
+ goto out;
/*
* Sample the clock to take the difference with the expiry time.
*/
- if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
- } else {
- struct sighand_struct *sighand;
- unsigned long flags;
-
- /*
- * Protect against sighand release/switch in exit/exec and
- * also make timer sampling safe if it ends up calling
- * thread_group_cputime().
- */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL)) {
- /*
- * The process has been reaped.
- * We can't even collect a sample any more.
- * Disarm the timer, nothing else to do.
- */
- cpu_timer_setexpires(ctmr, 0);
- return;
- } else {
- now = cpu_clock_sample_group(clkid, p, false);
- unlock_task_sighand(p, &flags);
- }
- }
+ else
+ now = cpu_clock_sample_group(clkid, p, false);
if (now < expires) {
itp->it_value = ns_to_timespec64(expires - now);
@@ -756,6 +764,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
+out:
+ rcu_read_unlock();
}
#define MAX_COLLECTED 20
@@ -976,56 +986,38 @@ static void check_process_timers(struct task_struct *tsk,
static void posix_cpu_timer_rearm(struct k_itimer *timer)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
+ struct task_struct *p;
struct sighand_struct *sighand;
unsigned long flags;
u64 now;
- if (WARN_ON_ONCE(!p))
- return;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
- if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
- bump_cpu_timer(timer, now);
- if (unlikely(p->exit_state))
- return;
-
- /* Protect timer list r/w in arm_timer() */
- sighand = lock_task_sighand(p, &flags);
- if (!sighand)
- return;
- } else {
- /*
- * Protect arm_timer() and timer sampling in case of call to
- * thread_group_cputime().
- */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL)) {
- /*
- * The process has been reaped.
- * We can't even collect a sample any more.
- */
- cpu_timer_setexpires(ctmr, 0);
- return;
- } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
- /* If the process is dying, no need to rearm */
- goto unlock;
- }
+ else
now = cpu_clock_sample_group(clkid, p, true);
- bump_cpu_timer(timer, now);
- /* Leave the sighand locked for the call below. */
- }
+
+ bump_cpu_timer(timer, now);
+
+ /* Protect timer list r/w in arm_timer() */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL))
+ goto out;
/*
* Now re-arm for the new expiry time.
*/
- arm_timer(timer);
-unlock:
+ arm_timer(timer, p);
unlock_task_sighand(p, &flags);
+out:
+ rcu_read_unlock();
}
/**
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index ff0eb30de346..07709ac30439 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -121,7 +121,8 @@ static struct k_itimer *__posix_timers_find(struct hlist_head *head,
{
struct k_itimer *timer;
- hlist_for_each_entry_rcu(timer, head, t_hash) {
+ hlist_for_each_entry_rcu(timer, head, t_hash,
+ lockdep_is_held(&hash_lock)) {
if ((timer->it_signal == sig) && (timer->it_id == id))
return timer;
}
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index e4332e3e2d56..fa3f800d7d76 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -208,7 +208,8 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
if (sched_clock_timer.function != NULL) {
/* update timeout for clock wrap */
- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt,
+ HRTIMER_MODE_REL_HARD);
}
r = rate;
@@ -254,9 +255,9 @@ void __init generic_sched_clock_init(void)
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
- hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
sched_clock_timer.function = sched_clock_poll;
- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
}
/*
@@ -293,7 +294,7 @@ void sched_clock_resume(void)
struct clock_read_data *rd = &cd.read_data[0];
rd->epoch_cyc = cd.actual_read_sched_clock();
- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
rd->read_sched_clock = cd.actual_read_sched_clock;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 856280d2cbd4..9ebaab13339d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1005,9 +1005,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
return -EOVERFLOW;
tmp *= mult;
- rem *= mult;
- do_div(rem, div);
+ rem = div64_u64(rem * mult, div);
*base = tmp + rem;
return 0;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 568564ae3597..a5221abb4594 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1829,21 +1829,23 @@ static void process_timeout(struct timer_list *t)
* schedule_timeout - sleep until timeout
* @timeout: timeout value in jiffies
*
- * Make the current task sleep until @timeout jiffies have
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
+ * Make the current task sleep until @timeout jiffies have elapsed.
+ * The function behavior depends on the current task state
+ * (see also set_current_state() description):
*
- * You can set the task state as follows -
+ * %TASK_RUNNING - the scheduler is called, but the task does not sleep
+ * at all. That happens because sched_submit_work() does nothing for
+ * tasks in %TASK_RUNNING state.
*
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
* pass before the routine returns unless the current task is explicitly
- * woken up, (e.g. by wake_up_process())".
+ * woken up, (e.g. by wake_up_process()).
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task or the current task is explicitly woken
* up.
*
- * The current task state is guaranteed to be TASK_RUNNING when this
+ * The current task state is guaranteed to be %TASK_RUNNING when this
* routine returns.
*
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
@@ -1851,7 +1853,7 @@ static void process_timeout(struct timer_list *t)
* value will be %MAX_SCHEDULE_TIMEOUT.
*
* Returns 0 when the timer has expired otherwise the remaining time in
- * jiffies will be returned. In all cases the return value is guaranteed
+ * jiffies will be returned. In all cases the return value is guaranteed
* to be non-negative.
*/
signed long __sched schedule_timeout(signed long timeout)
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 9577c89179cd..54ce6eb2ca36 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -71,13 +71,15 @@ void update_vsyscall(struct timekeeper *tk)
{
struct vdso_data *vdata = __arch_get_k_vdso_data();
struct vdso_timestamp *vdso_ts;
+ s32 clock_mode;
u64 nsec;
/* copy vsyscall data */
vdso_write_begin(vdata);
- vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk);
- vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk);
+ clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
+ vdata[CS_HRES_COARSE].clock_mode = clock_mode;
+ vdata[CS_RAW].clock_mode = clock_mode;
/* CLOCK_REALTIME also required for time() */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
@@ -103,10 +105,10 @@ void update_vsyscall(struct timekeeper *tk)
WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
/*
- * Architectures can opt out of updating the high resolution part
- * of the VDSO.
+ * If the current clocksource is not VDSO capable, then spare the
+ * update of the high reolution parts.
*/
- if (__arch_update_vdso_data())
+ if (clock_mode != VDSO_CLOCKMODE_NONE)
update_vdso_data(vdata, tk);
__arch_update_vsyscall(vdata, tk);