diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 22:14:19 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 22:14:19 +0300 |
commit | 54a728dc5e4feb0a9278ad62b19f34ad21ed0ee4 (patch) | |
tree | 2737c23d4dbc6426d6d9467626a7634cbbb40fcd /arch | |
parent | 28a27cbd86076c1a6be311c751b421c4c17a7dd9 (diff) | |
parent | adf3c31e18b765ea24eba7b0c1efc076b8ee3d55 (diff) | |
download | linux-54a728dc5e4feb0a9278ad62b19f34ad21ed0ee4.tar.xz |
Merge tag 'sched-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler udpates from Ingo Molnar:
- Changes to core scheduling facilities:
- Add "Core Scheduling" via CONFIG_SCHED_CORE=y, which enables
coordinated scheduling across SMT siblings. This is a much
requested feature for cloud computing platforms, to allow the
flexible utilization of SMT siblings, without exposing untrusted
domains to information leaks & side channels, plus to ensure more
deterministic computing performance on SMT systems used by
heterogenous workloads.
There are new prctls to set core scheduling groups, which allows
more flexible management of workloads that can share siblings.
- Fix task->state access anti-patterns that may result in missed
wakeups and rename it to ->__state in the process to catch new
abuses.
- Load-balancing changes:
- Tweak newidle_balance for fair-sched, to improve 'memcache'-like
workloads.
- "Age" (decay) average idle time, to better track & improve
workloads such as 'tbench'.
- Fix & improve energy-aware (EAS) balancing logic & metrics.
- Fix & improve the uclamp metrics.
- Fix task migration (taskset) corner case on !CONFIG_CPUSET.
- Fix RT and deadline utilization tracking across policy changes
- Introduce a "burstable" CFS controller via cgroups, which allows
bursty CPU-bound workloads to borrow a bit against their future
quota to improve overall latencies & batching. Can be tweaked via
/sys/fs/cgroup/cpu/<X>/cpu.cfs_burst_us.
- Rework assymetric topology/capacity detection & handling.
- Scheduler statistics & tooling:
- Disable delayacct by default, but add a sysctl to enable it at
runtime if tooling needs it. Use static keys and other
optimizations to make it more palatable.
- Use sched_clock() in delayacct, instead of ktime_get_ns().
- Misc cleanups and fixes.
* tag 'sched-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits)
sched/doc: Update the CPU capacity asymmetry bits
sched/topology: Rework CPU capacity asymmetry detection
sched/core: Introduce SD_ASYM_CPUCAPACITY_FULL sched_domain flag
psi: Fix race between psi_trigger_create/destroy
sched/fair: Introduce the burstable CFS controller
sched/uclamp: Fix uclamp_tg_restrict()
sched/rt: Fix Deadline utilization tracking during policy change
sched/rt: Fix RT utilization tracking during policy change
sched: Change task_struct::state
sched,arch: Remove unused TASK_STATE offsets
sched,timer: Use __set_current_state()
sched: Add get_current_state()
sched,perf,kvm: Fix preemption condition
sched: Introduce task_is_running()
sched: Unbreak wakeups
sched/fair: Age the average idle time
sched/cpufreq: Consider reduced CPU capacity in energy calculation
sched/fair: Take thermal pressure into account while estimating energy
thermal/cpufreq_cooling: Update offline CPUs per-cpu thermal_pressure
sched/fair: Return early from update_tg_cfs_load() if delta == 0
...
Diffstat (limited to 'arch')
54 files changed, 45 insertions, 79 deletions
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 5112ab996394..ef0c08ed0481 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -380,7 +380,7 @@ get_wchan(struct task_struct *p) { unsigned long schedule_frame; unsigned long pc; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; /* * This one depends on the frame size of schedule(). Do a diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index f4dd9f3f3001..4b2575f936d4 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -166,7 +166,6 @@ smp_callin(void) DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", cpuid, current, current->active_mm)); - preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 52906d314537..db0e104d6835 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -189,7 +189,6 @@ void start_kernel_secondary(void) pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); local_irq_enable(); - preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index f73da203b170..1b9576d21e24 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -83,7 +83,7 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, * is safe-kept and BLINK at a well known location in there */ - if (tsk->state == TASK_RUNNING) + if (task_is_running(tsk)) return -1; frame_info->task = tsk; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 6324f4db9b02..fc9e8b37eaa8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -288,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p) struct stackframe frame; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; frame.fp = thread_saved_fp(p); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 74679240a9d8..c7bb168b0d97 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void) #endif pr_debug("CPU%u: Booted secondary processor\n", cpu); - preempt_disable(); trace_hardirqs_off(); /* diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index 80e946b2abee..e83f0982b99c 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc) } while (0) #define init_idle_preempt_count(p, cpu) do { \ - task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ } while (0) static inline void set_preempt_need_resched(void) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b4bb67f17a2c..14f3c19c6ad2 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -598,7 +598,7 @@ unsigned long get_wchan(struct task_struct *p) struct stackframe frame; unsigned long stack_page, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)try_get_task_stack(p); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dcd7041b2b07..6671000a8b7d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -224,7 +224,6 @@ asmlinkage notrace void secondary_start_kernel(void) init_gic_priority_masking(); rcu_cpu_starting(cpu); - preempt_disable(); trace_hardirqs_off(); /* diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 3964acf5451e..a4eba0908bfa 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -20,8 +20,6 @@ if VIRTUALIZATION menuconfig KVM bool "Kernel-based Virtual Machine (KVM) support" depends on OF - # for TASKSTATS/TASK_DELAY_ACCT: - depends on NET && MULTIUSER select MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT @@ -38,8 +36,7 @@ menuconfig KVM select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_VCPU_RUN_PID_CHANGE - select TASKSTATS - select TASK_DELAY_ACCT + select SCHED_INFO help Support hosting virtualized guest machines. diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c index 17479860d43d..1cbcba4b0dd1 100644 --- a/arch/csky/kernel/asm-offsets.c +++ b/arch/csky/kernel/asm-offsets.c @@ -9,7 +9,6 @@ int main(void) { /* offsets into the task struct */ - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index 0f9f5eef9338..e2993539af8e 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -281,7 +281,6 @@ void csky_start_secondary(void) pr_info("CPU%u Online: %s...\n", cpu, __func__); local_irq_enable(); - preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c index 16ae20a0af34..1b280ef08004 100644 --- a/arch/csky/kernel/stacktrace.c +++ b/arch/csky/kernel/stacktrace.c @@ -115,7 +115,7 @@ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; - if (likely(task && task != current && task->state != TASK_RUNNING)) + if (likely(task && task != current && !task_is_running(task))) walk_stackframe(task, NULL, save_wchan, &pc); return pc; } diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index d4b53af657c8..65571ee15132 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c @@ -21,7 +21,6 @@ int main(void) { /* offsets into the task struct */ - OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_PTRACE, task_struct, ptrace); OFFSET(TASK_BLOCKED, task_struct, blocked); diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 46b1342ce515..2ac27e4248a4 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -134,7 +134,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)p; diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index c61165c99ae0..6a6835fb4242 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -135,7 +135,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long fp, pc; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)task_stack_page(p); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index cdbac4b52f30..e628a88607bb 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1788,7 +1788,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->task = p; ti->cpu = cpu; p->stack = ti; - p->state = TASK_UNINTERRUPTIBLE; + p->__state = TASK_UNINTERRUPTIBLE; cpumask_set_cpu(cpu, &p->cpus_mask); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 7e1a1525e202..e56d63f4abf9 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -529,7 +529,7 @@ get_wchan (struct task_struct *p) unsigned long ip; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; /* @@ -542,7 +542,7 @@ get_wchan (struct task_struct *p) */ unw_init_from_blocked_task(&info, p); do { - if (p->state == TASK_RUNNING) + if (task_is_running(p)) return 0; if (unw_unwind(&info) < 0) return 0; diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index e14f5653393a..df28c7dd164f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -641,11 +641,11 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) read_lock(&tasklist_lock); if (child->sighand) { spin_lock_irq(&child->sighand->siglock); - if (child->state == TASK_STOPPED && + if (READ_ONCE(child->__state) == TASK_STOPPED && !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { set_notify_resume(child); - child->state = TASK_TRACED; + WRITE_ONCE(child->__state, TASK_TRACED); stopped = 1; } spin_unlock_irq(&child->sighand->siglock); @@ -665,9 +665,9 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) read_lock(&tasklist_lock); if (child->sighand) { spin_lock_irq(&child->sighand->siglock); - if (child->state == TASK_TRACED && + if (READ_ONCE(child->__state) == TASK_TRACED && (child->signal->flags & SIGNAL_STOP_STOPPED)) { - child->state = TASK_STOPPED; + WRITE_ONCE(child->__state, TASK_STOPPED); } spin_unlock_irq(&child->sighand->siglock); } diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 49b488580939..d10f780c13b9 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -441,7 +441,6 @@ start_secondary (void *unused) #endif efi_map_pal_code(); cpu_init(); - preempt_disable(); smp_callin(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index da83cc83e791..db49f9091711 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -268,7 +268,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long fp, pc; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)task_stack_page(p); diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 6c69ce7be2e8..b77dd188dec4 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -70,7 +70,6 @@ int main(int argc, char *argv[]) /* struct task_struct */ DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 5735b2cd6f2a..04ca75278f02 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -78,7 +78,6 @@ void output_ptreg_defines(void) void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); - OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bff080db0294..73c8e7990a97 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task) unsigned long ra = 0; #endif - if (!task || task == current || task->state == TASK_RUNNING) + if (!task || task == current || task_is_running(task)) goto out; if (!task_stack_page(task)) goto out; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index ef86fbad8546..d542fb7af3ba 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -348,7 +348,6 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); - preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index c1327e552ec6..391895b54d13 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -239,7 +239,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long stack_start, stack_end; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; if (IS_ENABLED(CONFIG_FRAME_POINTER)) { diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index c5f916ca6845..9ff37ba2bb60 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -223,7 +223,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)p; diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c index 18c703d1d761..710651d5aaae 100644 --- a/arch/openrisc/kernel/asm-offsets.c +++ b/arch/openrisc/kernel/asm-offsets.c @@ -37,7 +37,6 @@ int main(void) { /* offsets into the task_struct */ - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index 48e1092a64de..415e209732a3 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void) set_cpu_online(cpu, true); local_irq_enable(); - - preempt_disable(); /* * OK, it's off to the idle thread for us */ diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index cd2cc1b1648c..33113ba24054 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -42,7 +42,6 @@ int main(void) { DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b144fbe29bc1..184ec3c1eae4 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -249,7 +249,7 @@ get_wchan(struct task_struct *p) unsigned long ip; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; /* @@ -260,7 +260,7 @@ get_wchan(struct task_struct *p) do { if (unwind_once(&info) < 0) return 0; - if (p->state == TASK_RUNNING) + if (task_is_running(p)) return 0; ip = info.ip; if (!in_sched_functions(ip)) diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 10227f667c8a..1405b603b91b 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc) #endif smp_cpu_init(slave_id); - preempt_disable(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 89e34aa273e2..8935c5696bce 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -2084,7 +2084,7 @@ static unsigned long __get_wchan(struct task_struct *p) unsigned long ip, sp; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; sp = p->thread.ksp; @@ -2094,7 +2094,7 @@ static unsigned long __get_wchan(struct task_struct *p) do { sp = *(unsigned long *)sp; if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || - p->state == TASK_RUNNING) + task_is_running(p)) return 0; if (count > 0) { ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 2e05c783440a..6c6e4d934d86 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1547,7 +1547,6 @@ void start_secondary(void *unused) smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); rcu_cpu_starting(cpu); - preempt_disable(); cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index c8173e92f19d..84de2d7c2f40 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3162,6 +3162,7 @@ memzcan(void) static void show_task(struct task_struct *tsk) { + unsigned int p_state = READ_ONCE(tsk->__state); char state; /* @@ -3169,14 +3170,14 @@ static void show_task(struct task_struct *tsk) * appropriate for calling from xmon. This could be moved * to a common, generic, routine used by both. */ - state = (tsk->state == 0) ? 'R' : - (tsk->state < 0) ? 'U' : - (tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' : - (tsk->state & TASK_STOPPED) ? 'T' : - (tsk->state & TASK_TRACED) ? 'C' : + state = (p_state == 0) ? 'R' : + (p_state < 0) ? 'U' : + (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : + (p_state & TASK_STOPPED) ? 'T' : + (p_state & TASK_TRACED) ? 'C' : (tsk->exit_state & EXIT_ZOMBIE) ? 'Z' : (tsk->exit_state & EXIT_DEAD) ? 'E' : - (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; + (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?'; printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk, tsk->thread.ksp, tsk->thread.regs, diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 9a408e2942ac..bd82375db51a 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void) * Disable preemption before enabling interrupts, so we don't try to * schedule a CPU that hasn't actually started yet. */ - preempt_disable(); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index bde85fc53357..ff467b98c3e3 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -132,7 +132,7 @@ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; - if (likely(task && task != current && task->state != TASK_RUNNING)) + if (likely(task && task != current && !task_is_running(task))) walk_stackframe(task, NULL, save_wchan, &pc); return pc; } diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index b49e0492842c..23ff51be7e29 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -32,7 +32,7 @@ static inline void preempt_count_set(int pc) #define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - S390_lowcore.preempt_count = PREEMPT_ENABLED; \ + S390_lowcore.preempt_count = PREEMPT_DISABLED; \ } while (0) static inline void set_preempt_need_resched(void) @@ -91,7 +91,7 @@ static inline void preempt_count_set(int pc) #define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - S390_lowcore.preempt_count = PREEMPT_ENABLED; \ + S390_lowcore.preempt_count = PREEMPT_DISABLED; \ } while (0) static inline void set_preempt_need_resched(void) diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e20bed1ed34a..7ae5dde9c54d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -180,7 +180,7 @@ unsigned long get_wchan(struct task_struct *p) struct unwind_state state; unsigned long ip = 0; - if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) + if (!p || p == current || task_is_running(p) || !task_stack_page(p)) return 0; if (!try_get_task_stack(p)) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2fec2b80d35d..111909aeb8d2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -878,7 +878,6 @@ static void smp_init_secondary(void) restore_access_regs(S390_lowcore.access_regs_save_area); cpu_init(); rcu_cpu_starting(cpu); - preempt_disable(); init_cpu_timer(); vtime_init(); vdso_getcpu_init(); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 826d01777361..8ae3dc5783fd 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -702,7 +702,7 @@ static void pfault_interrupt(struct ext_code ext_code, * interrupt since it must be a leftover of a PFAULT * CANCEL operation which didn't remove all pending * completion interrupts. */ - if (tsk->state == TASK_RUNNING) + if (task_is_running(tsk)) tsk->thread.pfault_wait = -1; } } else { diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 1aa508eb0823..717de05c81f4 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -186,7 +186,7 @@ unsigned long get_wchan(struct task_struct *p) { unsigned long pc; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; /* diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 372acdc9033e..65924d9ec245 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -186,8 +186,6 @@ asmlinkage void start_secondary(void) per_cpu_trap_init(); - preempt_disable(); - notify_cpu_starting(cpu); local_irq_enable(); diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 3b9794978e5b..93983d6d431d 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -376,8 +376,7 @@ unsigned long get_wchan(struct task_struct *task) struct reg_window32 *rw; int count = 0; - if (!task || task == current || - task->state == TASK_RUNNING) + if (!task || task == current || task_is_running(task)) goto out; fp = task_thread_info(task)->ksp + bias; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 7afd0a859a78..d33c58a58d4f 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -674,8 +674,7 @@ unsigned long get_wchan(struct task_struct *task) unsigned long ret = 0; int count = 0; - if (!task || task == current || - task->state == TASK_RUNNING) + if (!task || task == current || task_is_running(task)) goto out; tp = task_thread_info(task); diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 50c127ab46d5..22b148e5a5f8 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg) */ arch_cpu_pre_starting(arg); - preempt_disable(); cpu = smp_processor_id(); notify_cpu_starting(cpu); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e38d8bf454e8..ae5faa1d989d 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -138,9 +138,6 @@ void smp_callin(void) set_cpu_online(cpuid, true); - /* idle thread is expected to have preempt disabled */ - preempt_disable(); - local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index c5011064b5dd..457a38db368b 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -369,7 +369,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long stack_page, sp, ip; bool seen_sched = 0; - if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) + if ((p == NULL) || (p == current) || task_is_running(p)) return 0; stack_page = (unsigned long) task_stack_page(p); diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index f8cb8af4de5c..fe5efbcba824 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc) #define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ + per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \ } while (0) /* diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5e1f38179f49..e52b208b4641 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (p == current || p->state == TASK_RUNNING) + if (p == current || task_is_running(p)) return 0; if (!try_get_task_stack(p)) @@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_struct *p) goto out; } fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); - } while (count++ < 16 && p->state != TASK_RUNNING); + } while (count++ < 16 && !task_is_running(p)); out: put_task_stack(p); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7770245cc7fa..ec2d64aa2163 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -236,7 +236,6 @@ static void notrace start_secondary(void *unused) cpu_init(); rcu_cpu_starting(raw_smp_processor_id()); x86_cpuinit.early_percpu_clock_init(); - preempt_disable(); smp_callin(); enable_start_cpu0 = 0; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index f6b93a35ce14..fb8efb387aff 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -22,8 +22,6 @@ config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM depends on HIGH_RES_TIMERS - # for TASKSTATS/TASK_DELAY_ACCT: - depends on NET && MULTIUSER depends on X86_LOCAL_APIC select PREEMPT_NOTIFIERS select MMU_NOTIFIER @@ -36,8 +34,7 @@ config KVM select KVM_ASYNC_PF select USER_RETURN_NOTIFIER select KVM_MMIO - select TASKSTATS - select TASK_DELAY_ACCT + select SCHED_INFO select PERF_EVENTS select HAVE_KVM_MSI select HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9534ef515d74..060165340612 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -304,7 +304,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long stack_page = (unsigned long) task_stack_page(p); int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; sp = p->thread.sp; diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index cd85a7a2722b..1254da07ead1 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -145,7 +145,6 @@ void secondary_start_kernel(void) cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); - preempt_disable(); trace_hardirqs_off(); calibrate_delay(); |