diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/avr32/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/cris/include/arch-v10/arch/bitops.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/tile/kernel/stack.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/sysrq.c | 2 |
9 files changed, 12 insertions, 11 deletions
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index e7b61494c312..c2731003edef 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -341,7 +341,7 @@ unsigned long get_wchan(struct task_struct *p) * is actually quite ugly. It might be possible to * determine the frame size automatically at build * time by doing this: - * - compile sched.c + * - compile sched/core.c * - disassemble the resulting sched.o * - look for 'sub sp,??' shortly after '<schedule>:' */ diff --git a/arch/cris/include/arch-v10/arch/bitops.h b/arch/cris/include/arch-v10/arch/bitops.h index be85f6de25d3..03d9cfd92c8a 100644 --- a/arch/cris/include/arch-v10/arch/bitops.h +++ b/arch/cris/include/arch-v10/arch/bitops.h @@ -17,7 +17,7 @@ static inline unsigned long cris_swapnwbrlz(unsigned long w) in another register: ! __asm__ ("swapnwbr %2\n\tlz %2,%0" ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w)); - confuses gcc (sched.c, gcc from cris-dist-1.14). */ + confuses gcc (core.c, gcc from cris-dist-1.14). */ unsigned long res; __asm__ ("swapnwbr %0 \n\t" diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 9be4e497f3d3..991ca336b8a2 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -1035,7 +1035,7 @@ END(ia64_delay_loop) * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in - * kernel/sched.c ensures that. + * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index fd814e08c945..cb098628aee8 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -27,12 +27,12 @@ unsigned long mt_fpemul_threshold; * FPU affinity with the user's requested processor affinity. * This code is 98% identical with the sys_sched_setaffinity() * and sys_sched_getaffinity() system calls, and should be - * updated when kernel/sched.c changes. + * updated when kernel/sched/core.c changes. */ /* * find_process_by_pid - find a process with a matching PID value. - * used in sys_sched_set/getaffinity() in kernel/sched.c, so + * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so * cloned here. */ static inline struct task_struct *find_process_by_pid(pid_t pid) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9b36424b03c5..e9127ec612ef 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -476,8 +476,9 @@ einval: li v0, -ENOSYS /* * For FPU affinity scheduling on MIPS MT processors, we need to * intercept sys_sched_xxxaffinity() calls until we get a proper hook - * in kernel/sched.c. Considered only temporary we only support these - * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm. + * in kernel/sched/core.c. Considered only temporary we only support + * these hooks for the 32-bit kernel - there is no MIPS64 MT processor + * atm. */ sys mipsmt_sys_sched_setaffinity 3 sys mipsmt_sys_sched_getaffinity 3 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index a73668a5f30d..b467530e2485 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -38,7 +38,7 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm); /* * switch_mm is the entry point called from the architecture independent - * code in kernel/sched.c + * code in kernel/sched/core.c */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 2b70dfb1442e..b3f104953da2 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -225,7 +225,7 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags); /* * Return saved (kernel) PC of a blocked thread. - * Only used in a printk() in kernel/sched.c, so don't work too hard. + * Only used in a printk() in kernel/sched/core.c, so don't work too hard. */ #define thread_saved_pc(t) ((t)->thread.pc) diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index ed258b8ae320..af8dfc9665f6 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -442,7 +442,7 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, regs_to_pt_regs(®s, pc, lr, sp, r52)); } -/* This is called only from kernel/sched.c, with esp == NULL */ +/* This is called only from kernel/sched/core.c, with esp == NULL */ void show_stack(struct task_struct *task, unsigned long *esp) { struct KBacktraceIterator kbt; diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 7d101a2a1541..0dc4d1c6f98a 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -39,7 +39,7 @@ void show_trace(struct task_struct *task, unsigned long * stack) static const int kstack_depth_to_print = 24; /* This recently started being used in arch-independent code too, as in - * kernel/sched.c.*/ + * kernel/sched/core.c.*/ void show_stack(struct task_struct *task, unsigned long *esp) { unsigned long *stack; |