diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/crash_dump.c | 32 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 64 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 59 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 2 |
10 files changed, 52 insertions, 135 deletions
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c index 0ed3c3dee4cd..4ef68e2aa757 100644 --- a/arch/ia64/kernel/crash_dump.c +++ b/arch/ia64/kernel/crash_dump.c @@ -10,42 +10,18 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/crash_dump.h> - +#include <linux/uio.h> #include <asm/page.h> -#include <linux/uaccess.h> -/** - * copy_oldmem_page - copy one page from "oldmem" - * @pfn: page frame number to be copied - * @buf: target memory address for the copy; this can be in kernel address - * space or user address space (see @userbuf) - * @csize: number of bytes to copy - * @offset: offset in bytes into the page (based on pfn) to begin the copy - * @userbuf: if set, @buf is in user address space, use copy_to_user(), - * otherwise @buf is in kernel address space, use memcpy(). - * - * Copy a page from "oldmem". For this page, there is no pte mapped - * in the current kernel. We stitch up a pte, similar to kmap_atomic. - * - * Calling copy_to_user() in atomic context is not desirable. Hence first - * copying the data to a pre-allocated kernel page and then copying to user - * space in non-atomic context. - */ -ssize_t -copy_oldmem_page(unsigned long pfn, char *buf, - size_t csize, unsigned long offset, int userbuf) +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) { void *vaddr; if (!csize) return 0; vaddr = __va(pfn<<PAGE_SHIFT); - if (userbuf) { - if (copy_to_user(buf, (vaddr + offset), csize)) { - return -EFAULT; - } - } else - memcpy(buf, (vaddr + offset), csize); + csize = copy_to_iter(vaddr + offset, csize, iter); return csize; } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 1a7bab1c5d7c..ca34e51e84b4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -29,38 +29,38 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; enum instruction_type {A, I, M, F, B, L, X, u}; static enum instruction_type bundle_encoding[32][3] = { - { M, I, I }, /* 00 */ - { M, I, I }, /* 01 */ - { M, I, I }, /* 02 */ - { M, I, I }, /* 03 */ - { M, L, X }, /* 04 */ - { M, L, X }, /* 05 */ - { u, u, u }, /* 06 */ - { u, u, u }, /* 07 */ - { M, M, I }, /* 08 */ - { M, M, I }, /* 09 */ - { M, M, I }, /* 0A */ - { M, M, I }, /* 0B */ - { M, F, I }, /* 0C */ - { M, F, I }, /* 0D */ - { M, M, F }, /* 0E */ - { M, M, F }, /* 0F */ - { M, I, B }, /* 10 */ - { M, I, B }, /* 11 */ - { M, B, B }, /* 12 */ - { M, B, B }, /* 13 */ - { u, u, u }, /* 14 */ - { u, u, u }, /* 15 */ - { B, B, B }, /* 16 */ - { B, B, B }, /* 17 */ - { M, M, B }, /* 18 */ - { M, M, B }, /* 19 */ - { u, u, u }, /* 1A */ - { u, u, u }, /* 1B */ - { M, F, B }, /* 1C */ - { M, F, B }, /* 1D */ - { u, u, u }, /* 1E */ - { u, u, u }, /* 1F */ + [0x00] = { M, I, I }, + [0x01] = { M, I, I }, + [0x02] = { M, I, I }, + [0x03] = { M, I, I }, + [0x04] = { M, L, X }, + [0x05] = { M, L, X }, + [0x06] = { u, u, u }, + [0x07] = { u, u, u }, + [0x08] = { M, M, I }, + [0x09] = { M, M, I }, + [0x0A] = { M, M, I }, + [0x0B] = { M, M, I }, + [0x0C] = { M, F, I }, + [0x0D] = { M, F, I }, + [0x0E] = { M, M, F }, + [0x0F] = { M, M, F }, + [0x10] = { M, I, B }, + [0x11] = { M, I, B }, + [0x12] = { M, B, B }, + [0x13] = { M, B, B }, + [0x14] = { u, u, u }, + [0x15] = { u, u, u }, + [0x16] = { B, B, B }, + [0x17] = { B, B, B }, + [0x18] = { M, M, B }, + [0x19] = { M, M, B }, + [0x1A] = { u, u, u }, + [0x1B] = { u, u, u }, + [0x1C] = { M, F, B }, + [0x1D] = { M, F, B }, + [0x1E] = { u, u, u }, + [0x1F] = { u, u, u }, }; /* Insert a long branch code */ diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index e628a88607bb..c62a66710ad6 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -290,7 +290,6 @@ static void ia64_mlogbuf_finish(int wait) { BREAK_LOGLEVEL(console_loglevel); - spin_lock_init(&mlogbuf_rlock); ia64_mlogbuf_dump(); printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " "MCA/INIT might be dodgy or fail.\n"); diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 64189f04c1a4..b9ae093bfe37 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -120,7 +120,7 @@ static const char *mem_attrib[]={ * Input: * - a pointer to a buffer to hold the string * - a 64-bit vector - * Ouput: + * Output: * - a pointer to the end of the buffer * */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index d7a256bd9d6b..416305e550e2 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/notifier.h> #include <linux/personality.h> +#include <linux/reboot.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/hotplug.h> @@ -295,9 +296,12 @@ ia64_load_extra (struct task_struct *task) * so there is nothing to worry about. */ int -copy_thread(unsigned long clone_flags, unsigned long user_stack_base, - unsigned long user_stack_size, struct task_struct *p, unsigned long tls) +copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + unsigned long clone_flags = args->flags; + unsigned long user_stack_base = args->stack; + unsigned long user_stack_size = args->stack_size; + unsigned long tls = args->tls; extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; unsigned long rbs, child_rbs, rbs_size; @@ -338,14 +342,14 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { - if (unlikely(!user_stack_base)) { + if (unlikely(args->fn)) { + if (unlikely(args->idle)) { /* fork_idle() called us */ return 0; } memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack)); - child_stack->r4 = user_stack_base; /* payload */ - child_stack->r5 = user_stack_size; /* argument */ + child_stack->r4 = (unsigned long) args->fn; + child_stack->r5 = (unsigned long) args->fn_arg; /* * Preserve PSR bits, except for bits 32-34 and 37-45, * which we can't read. @@ -599,8 +603,7 @@ machine_halt (void) void machine_power_off (void) { - if (pm_power_off) - pm_power_off(); + do_kernel_power_off(); machine_halt(); } diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index a19acd9f5e1f..ab8aeb34d1d9 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -618,63 +618,6 @@ void ia64_sync_krbs(void) } /* - * After PTRACE_ATTACH, a thread's register backing store area in user - * space is assumed to contain correct data whenever the thread is - * stopped. arch_ptrace_stop takes care of this on tracing stops. - * But if the child was already stopped for job control when we attach - * to it, then it might not ever get into ptrace_stop by the time we - * want to examine the user memory containing the RBS. - */ -void -ptrace_attach_sync_user_rbs (struct task_struct *child) -{ - int stopped = 0; - struct unw_frame_info info; - - /* - * If the child is in TASK_STOPPED, we need to change that to - * TASK_TRACED momentarily while we operate on it. This ensures - * that the child won't be woken up and return to user mode while - * we are doing the sync. (It can only be woken up for SIGKILL.) - */ - - read_lock(&tasklist_lock); - if (child->sighand) { - spin_lock_irq(&child->sighand->siglock); - if (READ_ONCE(child->__state) == TASK_STOPPED && - !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { - set_notify_resume(child); - - WRITE_ONCE(child->__state, TASK_TRACED); - stopped = 1; - } - spin_unlock_irq(&child->sighand->siglock); - } - read_unlock(&tasklist_lock); - - if (!stopped) - return; - - unw_init_from_blocked_task(&info, child); - do_sync_rbs(&info, ia64_sync_user_rbs); - - /* - * Now move the child back into TASK_STOPPED if it should be in a - * job control stop, so that SIGCONT can be used to wake it up. - */ - read_lock(&tasklist_lock); - if (child->sighand) { - spin_lock_irq(&child->sighand->siglock); - if (READ_ONCE(child->__state) == TASK_TRACED && - (child->signal->flags & SIGNAL_STOP_STOPPED)) { - WRITE_ONCE(child->__state, TASK_STOPPED); - } - spin_unlock_irq(&child->sighand->siglock); - } - read_unlock(&tasklist_lock); -} - -/* * Write f32-f127 back to task->thread.fph if it has been modified. */ inline void @@ -2025,7 +1968,7 @@ static void syscall_get_args_cb(struct unw_frame_info *info, void *data) * - epsinstruction: cfm is set by br.call * locals don't exist. * - * For both cases argguments are reachable in cfm.sof - cfm.sol. + * For both cases arguments are reachable in cfm.sof - cfm.sol. * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ] */ cfm = pt->cr_ifs; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 5010348fa21b..fd6301eafa9d 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -572,7 +572,7 @@ setup_arch (char **cmdline_p) #ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); #endif - per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? + per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ? 32 : cpumask_weight(&early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d10f780c13b9..d0e935cf2093 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -576,8 +576,6 @@ clear_cpu_sibling_map(int cpu) static void remove_siblinginfo(int cpu) { - int last = 0; - if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); @@ -585,8 +583,6 @@ remove_siblinginfo(int cpu) return; } - last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); - /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 753642366e12..53735b1d1be3 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -309,7 +309,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) /* * Lower 4 bits are used as a count. Upper bits are a sequence * number that is updated when count is reset. The cmpxchg will - * fail is seqno has changed. This minimizes mutiple cpus + * fail is seqno has changed. This minimizes multiple cpus * resetting the count. */ if (current_jiffies > last.time) diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 816803636a75..a0fec82c56b8 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -261,7 +261,7 @@ static int __init uncached_init(void) { int nid; - for_each_node_state(nid, N_ONLINE) { + for_each_online_node(nid) { uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); mutex_init(&uncached_pools[nid].add_chunk_mutex); } |