diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 12 | ||||
-rw-r--r-- | arch/ia64/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgalloc.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 32 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 66 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 396 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 1 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 6 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 3 |
15 files changed, 190 insertions, 358 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1fa2fe2ef053..5b4ec80bf586 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -192,6 +192,7 @@ config IA64_SGI_UV config IA64_HP_SBA_IOMMU bool "HP SBA IOMMU support" + select DMA_OPS default y help Say Y here to add support for the SBA IOMMU found on HP zx1 and diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a806227c1fad..656a4888c300 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -907,7 +907,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dir: dma direction * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, @@ -1028,7 +1028,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -1105,7 +1105,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, @@ -1162,7 +1162,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) @@ -1425,7 +1425,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, @@ -1524,7 +1524,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index c70bb9c11f52..6629301a2620 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -179,8 +179,6 @@ extern void ia64_init_addr_space (void); #define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) #define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) -typedef unsigned long elf_fpxregset_t; - typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 2a3050345099..9601cfe83c94 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -29,11 +29,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - #if CONFIG_PGTABLE_LEVELS == 4 static inline void p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) @@ -41,15 +36,6 @@ p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) p4d_val(*p4d_entry) = __pa(pud); } -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - free_page((unsigned long)pud); -} #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #endif /* CONFIG_PGTABLE_LEVELS == 4 */ @@ -59,16 +45,6 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_val(*pud_entry) = __pa(pmd); } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) static inline void diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index f1f257d632b3..8d9da6f08a62 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -42,7 +42,6 @@ #include <linux/pagemap.h> #include <linux/swap.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index c5efac285bc3..e98e3dafffd8 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -112,19 +112,16 @@ GLOBAL_ENTRY(sys_clone2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK - adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp mov loc0=rp - mov loc1=r16 // save ar.pfs across do_fork + mov loc1=r16 // save ar.pfs across ia64_clone .body + mov out0=in0 mov out1=in1 mov out2=in2 - tbit.nz p6,p0=in0,CLONE_SETTLS_BIT - mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID - ;; -(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() - mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID - mov out0=in0 // out0 = clone_flags - br.call.sptk.many rp=do_fork + mov out3=in3 + mov out4=in4 + mov out5=in5 + br.call.sptk.many rp=ia64_clone .ret1: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 @@ -143,19 +140,16 @@ GLOBAL_ENTRY(sys_clone) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK - adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp mov loc0=rp - mov loc1=r16 // save ar.pfs across do_fork + mov loc1=r16 // save ar.pfs across ia64_clone .body + mov out0=in0 mov out1=in1 mov out2=16 // stacksize (compensates for 16-byte scratch area) - tbit.nz p6,p0=in0,CLONE_SETTLS_BIT - mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID - ;; -(p6) st8 [r2]=in4 // store TLS in r13 (tp) - mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID - mov out0=in0 // out0 = clone_flags - br.call.sptk.many rp=do_fork + mov out3=in3 + mov out4=in4 + mov out5=in5 + br.call.sptk.many rp=ia64_clone .ret2: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 @@ -590,7 +584,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) nop.i 0 /* * We need to call schedule_tail() to complete the scheduling process. - * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the + * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the * address of the previously executing task. */ br.call.sptk.many rp=ia64_invoke_schedule_tail diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 4562a1aed454..f19cb97c0098 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -40,7 +40,6 @@ #include <asm/elf.h> #include <asm/irq.h> #include <asm/kexec.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/switch_to.h> @@ -297,7 +296,7 @@ ia64_load_extra (struct task_struct *task) pfm_load_regs(task); info = __this_cpu_read(pfm_syst_info); - if (info & PFM_CPUINFO_SYST_WIDE) + if (info & PFM_CPUINFO_SYST_WIDE) pfm_syst_wide_update_task(task, info, 1); #endif } @@ -311,7 +310,7 @@ ia64_load_extra (struct task_struct *task) * * <clone syscall> <some kernel call frames> * sys_clone : - * do_fork do_fork + * _do_fork _do_fork * copy_thread copy_thread * * This means that the stack layout is as follows: @@ -334,9 +333,8 @@ ia64_load_extra (struct task_struct *task) * so there is nothing to worry about. */ int -copy_thread(unsigned long clone_flags, - unsigned long user_stack_base, unsigned long user_stack_size, - struct task_struct *p) +copy_thread(unsigned long clone_flags, unsigned long user_stack_base, + unsigned long user_stack_size, struct task_struct *p, unsigned long tls) { extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; @@ -417,7 +415,7 @@ copy_thread(unsigned long clone_flags, rbs_size = stack->ar_bspstore - rbs; memcpy((void *) child_rbs, (void *) rbs, rbs_size); if (clone_flags & CLONE_SETTLS) - child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ + child_ptregs->r13 = tls; if (user_stack_base) { child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->ar_bspstore = user_stack_base; @@ -442,11 +440,29 @@ copy_thread(unsigned long clone_flags, return retval; } +asmlinkage long ia64_clone(unsigned long clone_flags, unsigned long stack_start, + unsigned long stack_size, unsigned long parent_tidptr, + unsigned long child_tidptr, unsigned long tls) +{ + struct kernel_clone_args args = { + .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), + .pidfd = (int __user *)parent_tidptr, + .child_tid = (int __user *)child_tidptr, + .parent_tid = (int __user *)parent_tidptr, + .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), + .stack = stack_start, + .stack_size = stack_size, + .tls = tls, + }; + + return _do_fork(&args); +} + static void do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) { unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; - unsigned long uninitialized_var(ip); /* GCC be quiet */ + unsigned long ip; elf_greg_t *dst = arg; struct pt_regs *pt; char nat; @@ -516,51 +532,17 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void * } void -do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) -{ - elf_fpreg_t *dst = arg; - int i; - - memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ - - if (unw_unwind_to_user(info) < 0) - return; - - /* f0 is 0.0, f1 is 1.0 */ - - for (i = 2; i < 32; ++i) - unw_get_fr(info, i, dst + i); - - ia64_flush_fph(task); - if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) - memcpy(dst + 32, task->thread.fph, 96*16); -} - -void do_copy_regs (struct unw_frame_info *info, void *arg) { do_copy_task_regs(current, info, arg); } void -do_dump_fpu (struct unw_frame_info *info, void *arg) -{ - do_dump_task_fpu(current, info, arg); -} - -void ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) { unw_init_running(do_copy_regs, dst); } -int -dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) -{ - unw_init_running(do_dump_fpu, dst); - return 1; /* f0-f31 are always valid so we always return 1 */ -} - /* * Flush thread state. This is called when a thread does an execve(). */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 82aaacf64583..33ca9fa0fbf5 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -1273,52 +1273,43 @@ struct regset_getset { int ret; }; +static const ptrdiff_t pt_offsets[32] = +{ +#define R(n) offsetof(struct pt_regs, r##n) + [0] = -1, R(1), R(2), R(3), + [4] = -1, [5] = -1, [6] = -1, [7] = -1, + R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15), + R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23), + R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31), +#undef R +}; + static int access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { - struct pt_regs *pt; - unsigned long *ptr = NULL; - int ret; - char nat = 0; + struct pt_regs *pt = task_pt_regs(target); + unsigned reg = addr / sizeof(unsigned long); + ptrdiff_t d = pt_offsets[reg]; - pt = task_pt_regs(target); - switch (addr) { - case ELF_GR_OFFSET(1): - ptr = &pt->r1; - break; - case ELF_GR_OFFSET(2): - case ELF_GR_OFFSET(3): - ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); - break; - case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): + if (d >= 0) { + unsigned long *ptr = (void *)pt + d; + if (write_access) + *ptr = *data; + else + *data = *ptr; + return 0; + } else { + char nat = 0; if (write_access) { /* read NaT bit first: */ unsigned long dummy; - - ret = unw_get_gr(info, addr/8, &dummy, &nat); + int ret = unw_get_gr(info, reg, &dummy, &nat); if (ret < 0) return ret; } - return unw_access_gr(info, addr/8, data, &nat, write_access); - case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): - ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); - break; - case ELF_GR_OFFSET(12): - case ELF_GR_OFFSET(13): - ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); - break; - case ELF_GR_OFFSET(14): - ptr = &pt->r14; - break; - case ELF_GR_OFFSET(15): - ptr = &pt->r15; + return unw_access_gr(info, reg, data, &nat, write_access); } - if (write_access) - *ptr = *data; - else - *data = *ptr; - return 0; } static int @@ -1490,7 +1481,7 @@ static int access_elf_reg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { - if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) + if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31)) return access_elf_gpreg(target, info, addr, data, write_access); else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) return access_elf_breg(target, info, addr, data, write_access); @@ -1498,12 +1489,17 @@ access_elf_reg(struct task_struct *target, struct unw_frame_info *info, return access_elf_areg(target, info, addr, data, write_access); } +struct regset_membuf { + struct membuf to; + int ret; +}; + void do_gpregs_get(struct unw_frame_info *info, void *arg) { - struct pt_regs *pt; - struct regset_getset *dst = arg; - elf_greg_t tmp[16]; - unsigned int i, index, min_copy; + struct regset_membuf *dst = arg; + struct membuf to = dst->to; + unsigned int n; + elf_greg_t reg; if (unw_unwind_to_user(info) < 0) return; @@ -1521,165 +1517,53 @@ void do_gpregs_get(struct unw_frame_info *info, void *arg) /* Skip r0 */ - if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { - dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, - &dst->u.get.kbuf, - &dst->u.get.ubuf, - 0, ELF_GR_OFFSET(1)); - if (dst->ret || dst->count == 0) + membuf_zero(&to, 8); + for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) { + if (access_elf_reg(info->task, info, n, ®, 0) < 0) { + dst->ret = -EIO; return; - } - - /* gr1 - gr15 */ - if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { - index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); - min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? - (dst->pos + dst->count) : ELF_GR_OFFSET(16); - for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), - index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 0) < 0) { - dst->ret = -EIO; - return; - } - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, - ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); - if (dst->ret || dst->count == 0) - return; - } - - /* r16-r31 */ - if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { - pt = task_pt_regs(dst->target); - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, - ELF_GR_OFFSET(16), ELF_NAT_OFFSET); - if (dst->ret || dst->count == 0) - return; - } - - /* nat, pr, b0 - b7 */ - if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { - index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); - min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? - (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; - for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), - index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 0) < 0) { - dst->ret = -EIO; - return; - } - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, - ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); - if (dst->ret || dst->count == 0) - return; - } - - /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat - * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd - */ - if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { - index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); - min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? - (dst->pos + dst->count) : ELF_AR_END_OFFSET; - for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), - index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 0) < 0) { - dst->ret = -EIO; - return; - } - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, - ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); + } + membuf_store(&to, reg); } } void do_gpregs_set(struct unw_frame_info *info, void *arg) { - struct pt_regs *pt; struct regset_getset *dst = arg; - elf_greg_t tmp[16]; - unsigned int i, index; if (unw_unwind_to_user(info) < 0) return; + if (!dst->count) + return; /* Skip r0 */ - if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { + if (dst->pos < ELF_GR_OFFSET(1)) { dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, 0, ELF_GR_OFFSET(1)); - if (dst->ret || dst->count == 0) - return; - } - - /* gr1-gr15 */ - if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { - i = dst->pos; - index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, - ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); if (dst->ret) return; - for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 1) < 0) { - dst->ret = -EIO; - return; - } - if (dst->count == 0) - return; - } - - /* gr16-gr31 */ - if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { - pt = task_pt_regs(dst->target); - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, - ELF_GR_OFFSET(16), ELF_NAT_OFFSET); - if (dst->ret || dst->count == 0) - return; } - /* nat, pr, b0 - b7 */ - if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { - i = dst->pos; - index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, - ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); - if (dst->ret) - return; - for (; i < dst->pos; i += sizeof(elf_greg_t), index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 1) < 0) { - dst->ret = -EIO; - return; - } - if (dst->count == 0) - return; - } + while (dst->count && dst->pos < ELF_AR_END_OFFSET) { + unsigned int n, from, to; + elf_greg_t tmp[16]; - /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat - * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd - */ - if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { - i = dst->pos; - index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); + from = dst->pos; + to = from + sizeof(tmp); + if (to > ELF_AR_END_OFFSET) + to = ELF_AR_END_OFFSET; + /* get up to 16 values */ dst->ret = user_regset_copyin(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, - ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); + from, to); if (dst->ret) return; - for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) - if (access_elf_reg(dst->target, info, i, - &tmp[index], 1) < 0) { + /* now copy them into registers */ + for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++) + if (access_elf_reg(dst->target, info, from, + &tmp[n], 1) < 0) { dst->ret = -EIO; return; } @@ -1690,60 +1574,36 @@ void do_gpregs_set(struct unw_frame_info *info, void *arg) void do_fpregs_get(struct unw_frame_info *info, void *arg) { - struct regset_getset *dst = arg; - struct task_struct *task = dst->target; - elf_fpreg_t tmp[30]; - int index, min_copy, i; + struct task_struct *task = info->task; + struct regset_membuf *dst = arg; + struct membuf to = dst->to; + elf_fpreg_t reg; + unsigned int n; if (unw_unwind_to_user(info) < 0) return; /* Skip pos 0 and 1 */ - if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { - dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, - &dst->u.get.kbuf, - &dst->u.get.ubuf, - 0, ELF_FP_OFFSET(2)); - if (dst->count == 0 || dst->ret) - return; - } + membuf_zero(&to, 2 * sizeof(elf_fpreg_t)); /* fr2-fr31 */ - if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { - index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); - - min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), - dst->pos + dst->count); - for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), - index++) - if (unw_get_fr(info, i / sizeof(elf_fpreg_t), - &tmp[index])) { - dst->ret = -EIO; - return; - } - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, - ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); - if (dst->count == 0 || dst->ret) + for (n = 2; to.left && n < 32; n++) { + if (unw_get_fr(info, n, ®)) { + dst->ret = -EIO; return; + } + membuf_write(&to, ®, sizeof(reg)); } /* fph */ - if (dst->count > 0) { - ia64_flush_fph(dst->target); - if (task->thread.flags & IA64_THREAD_FPH_VALID) - dst->ret = user_regset_copyout( - &dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, - &dst->target->thread.fph, - ELF_FP_OFFSET(32), -1); - else - /* Zero fill instead. */ - dst->ret = user_regset_copyout_zero( - &dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, - ELF_FP_OFFSET(32), -1); - } + if (!to.left) + return; + + ia64_flush_fph(task); + if (task->thread.flags & IA64_THREAD_FPH_VALID) + membuf_write(&to, &task->thread.fph, 96 * sizeof(reg)); + else + membuf_zero(&to, 96 * sizeof(reg)); } void do_fpregs_set(struct unw_frame_info *info, void *arg) @@ -1819,6 +1679,20 @@ void do_fpregs_set(struct unw_frame_info *info, void *arg) } } +static void +unwind_and_call(void (*call)(struct unw_frame_info *, void *), + struct task_struct *target, void *data) +{ + if (target == current) + unw_init_running(call, data); + else { + struct unw_frame_info info; + memset(&info, 0, sizeof(info)); + unw_init_from_blocked_task(&info, target); + (*call)(&info, data); + } +} + static int do_regset_call(void (*call)(struct unw_frame_info *, void *), struct task_struct *target, @@ -1830,27 +1704,18 @@ do_regset_call(void (*call)(struct unw_frame_info *, void *), .pos = pos, .count = count, .u.set = { .kbuf = kbuf, .ubuf = ubuf }, .ret = 0 }; - - if (target == current) - unw_init_running(call, &info); - else { - struct unw_frame_info ufi; - memset(&ufi, 0, sizeof(ufi)); - unw_init_from_blocked_task(&ufi, target); - (*call)(&ufi, &info); - } - + unwind_and_call(call, target, &info); return info.ret; } static int gpregs_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - return do_regset_call(do_gpregs_get, target, regset, pos, count, - kbuf, ubuf); + struct regset_membuf info = {.to = to}; + unwind_and_call(do_gpregs_get, target, &info); + return info.ret; } static int gpregs_set(struct task_struct *target, @@ -1892,11 +1757,11 @@ fpregs_active(struct task_struct *target, const struct user_regset *regset) static int fpregs_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - return do_regset_call(do_fpregs_get, target, regset, pos, count, - kbuf, ubuf); + struct regset_membuf info = {.to = to}; + unwind_and_call(do_fpregs_get, target, &info); + return info.ret; } static int fpregs_set(struct task_struct *target, @@ -1913,7 +1778,6 @@ access_uarea(struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) { unsigned int pos = -1; /* an invalid value */ - int ret; unsigned long *ptr, regnum; if ((addr & 0x7) != 0) { @@ -1945,14 +1809,39 @@ access_uarea(struct task_struct *child, unsigned long addr, } if (pos != -1) { - if (write_access) - ret = fpregs_set(child, NULL, pos, - sizeof(unsigned long), data, NULL); - else - ret = fpregs_get(child, NULL, pos, - sizeof(unsigned long), data, NULL); - if (ret != 0) - return -1; + unsigned reg = pos / sizeof(elf_fpreg_t); + int which_half = (pos / sizeof(unsigned long)) & 1; + + if (reg < 32) { /* fr2-fr31 */ + struct unw_frame_info info; + elf_fpreg_t fpreg; + + memset(&info, 0, sizeof(info)); + unw_init_from_blocked_task(&info, child); + if (unw_unwind_to_user(&info) < 0) + return 0; + + if (unw_get_fr(&info, reg, &fpreg)) + return -1; + if (write_access) { + fpreg.u.bits[which_half] = *data; + if (unw_set_fr(&info, reg, fpreg)) + return -1; + } else { + *data = fpreg.u.bits[which_half]; + } + } else { /* fph */ + elf_fpreg_t *p = &child->thread.fph[reg - 32]; + unsigned long *bits = &p->u.bits[which_half]; + + ia64_sync_fph(child); + if (write_access) + *bits = *data; + else if (child->thread.flags & IA64_THREAD_FPH_VALID) + *data = *bits; + else + *data = 0; + } return 0; } @@ -2038,15 +1927,14 @@ access_uarea(struct task_struct *child, unsigned long addr, } if (pos != -1) { - if (write_access) - ret = gpregs_set(child, NULL, pos, - sizeof(unsigned long), data, NULL); - else - ret = gpregs_get(child, NULL, pos, - sizeof(unsigned long), data, NULL); - if (ret != 0) - return -1; - return 0; + struct unw_frame_info info; + + memset(&info, 0, sizeof(info)); + unw_init_from_blocked_task(&info, child); + if (unw_unwind_to_user(&info) < 0) + return 0; + + return access_elf_reg(child, &info, pos, data, write_access); } /* access debug registers */ @@ -2112,14 +2000,14 @@ static const struct user_regset native_regsets[] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), - .get = gpregs_get, .set = gpregs_set, + .regset_get = gpregs_get, .set = gpregs_set, .writeback = gpregs_writeback }, { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .get = fpregs_get, .set = fpregs_set, .active = fpregs_active + .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active }, }; diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 1cf7b9b3c1e2..7b7b64eb3129 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -39,7 +39,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 016683b743c2..c29c600d7967 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -49,7 +49,6 @@ #include <asm/irq.h> #include <asm/mca.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 49e325b604b3..ced9c83e47c9 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -356,6 +356,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index d7d31c718d2d..e30e360beef8 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -21,7 +21,6 @@ #include <linux/swap.h> #include <asm/meminit.h> -#include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/mca.h> diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index dd8284bcbf16..dbe829fc5298 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -24,7 +24,6 @@ #include <linux/efi.h> #include <linux/nodemask.h> #include <linux/slab.h> -#include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/meminit.h> #include <asm/numa.h> @@ -180,7 +179,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) void __init setup_per_cpu_areas(void) { struct pcpu_alloc_info *ai; - struct pcpu_group_info *uninitialized_var(gi); + struct pcpu_group_info *gi; unsigned int *cpu_map; void *base; unsigned long base_offset; @@ -601,7 +600,6 @@ void __init paging_init(void) max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); #ifdef CONFIG_VIRTUAL_MEM_MAP @@ -656,7 +654,7 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node); + return vmemmap_populate_basepages(start, end, node, NULL); } void vmemmap_free(unsigned long start, unsigned long end, diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 32352a73df0c..b331f94d20ac 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -18,7 +18,6 @@ #include <linux/sysctl.h> #include <linux/log2.h> #include <asm/mman.h> -#include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 72cc568bc841..135b5135cace 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -27,7 +27,6 @@ #include <asm/delay.h> #include <asm/mmu_context.h> -#include <asm/pgalloc.h> #include <asm/pal.h> #include <asm/tlbflush.h> #include <asm/dma.h> @@ -369,7 +368,7 @@ EXPORT_SYMBOL(flush_tlb_range); void ia64_tlb_init(void) { - ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ + ia64_ptce_info_t ptce_info; u64 tr_pgbits; long status; pal_vm_info_1_u_t vm_info_1; |