diff options
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/compat.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/dma-mapping.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/ptrace.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/tile/include/uapi/asm/ptrace.h | 8 | ||||
-rw-r--r-- | arch/tile/kernel/compat.c | 18 | ||||
-rw-r--r-- | arch/tile/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/pci.c | 4 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 3 | ||||
-rw-r--r-- | arch/tile/kernel/ptrace.c | 140 | ||||
-rw-r--r-- | arch/tile/mm/hugetlbpage.c | 139 |
13 files changed, 154 insertions, 170 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 6948015e08a2..b17b9b8e53cd 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -34,5 +34,6 @@ generic-y += sockios.h generic-y += statfs.h generic-y += termbits.h generic-y += termios.h +generic-y += trace_clock.h generic-y += types.h generic-y += xor.h diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index ca61fb4296b3..88f3c227afd9 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -296,8 +296,6 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags, long compat_sys_fallocate(int fd, int mode, u32 offset_lo, u32 offset_hi, u32 len_lo, u32 len_hi); -long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); /* Assembly trampoline to avoid clobbering r0. */ long _compat_sys_rt_sigreturn(void); diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 4b6247d1a315..f2ff191376b4 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -72,6 +72,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { + debug_dma_mapping_error(dev, dma_addr); return get_dma_ops(dev)->mapping_error(dev, dma_addr); } diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index b73e1039c911..ff8a93408823 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h @@ -170,4 +170,6 @@ do { \ #endif /* CONFIG_COMPAT */ +#define CORE_DUMP_USE_REGSET + #endif /* _ASM_TILE_ELF_H */ diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h index 3f792b6d60d5..2e83fc1b9467 100644 --- a/arch/tile/include/asm/ptrace.h +++ b/arch/tile/include/asm/ptrace.h @@ -24,8 +24,7 @@ typedef unsigned long pt_reg_t; #include <uapi/asm/ptrace.h> #define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) -#define PT_TRACE_MIGRATE 0x00080000 -#define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE) +#define PT_TRACE_MIGRATE PT_EVENT_FLAG(PTRACE_EVENT_MIGRATE) /* Flag bits in pt_regs.flags */ #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h index 940831fe9e94..6ac21034f69a 100644 --- a/arch/tile/include/asm/unistd.h +++ b/arch/tile/include/asm/unistd.h @@ -14,6 +14,7 @@ /* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ #ifdef CONFIG_COMPAT #define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL #endif #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_SYS_CLONE diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h index c717d0fec72e..7757e1985fb6 100644 --- a/arch/tile/include/uapi/asm/ptrace.h +++ b/arch/tile/include/uapi/asm/ptrace.h @@ -81,8 +81,14 @@ struct pt_regs { #define PTRACE_SETFPREGS 15 /* Support TILE-specific ptrace options, with events starting at 16. */ -#define PTRACE_O_TRACEMIGRATE 0x00010000 #define PTRACE_EVENT_MIGRATE 16 +#define PTRACE_O_TRACEMIGRATE (1 << PTRACE_EVENT_MIGRATE) +/* + * Flag bits in pt_regs.flags that are part of the ptrace API. + * We start our numbering higher up to avoid confusion with the + * non-ABI kernel-internal values that use the low 16 bits. + */ +#define PT_FLAGS_COMPAT 0x10000 /* process is an -m32 compat process */ #endif /* _UAPI_ASM_TILE_PTRACE_H */ diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index 9cd7cb6041c0..7f72401b4f45 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -76,24 +76,6 @@ long compat_sys_fallocate(int fd, int mode, ((loff_t)len_hi << 32) | len_lo); } - - -long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, - (struct timespec __force __user *)&t); - set_fs(old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - /* Provide the compat syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index 243ffebe38d6..4918d91bc3a6 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -42,8 +42,6 @@ void *module_alloc(unsigned long size) int i = 0; int npages; - if (size == 0) - return NULL; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); if (pages == NULL) diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 759822687e8f..aac1cd586966 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -245,7 +245,7 @@ static void __devinit fixup_read_and_payload_sizes(void) u16 new_values; /* Scan for the smallest maximum payload size. */ - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + for_each_pci_dev(dev) { u32 devcap; int max_payload; @@ -260,7 +260,7 @@ static void __devinit fixup_read_and_payload_sizes(void) /* Now, set the max_payload_size for all devices to that value. */ new_values = (max_read_size << 12) | (smallest_max_payload << 5); - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) + for_each_pci_dev(dev) pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, new_values); diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 2ba6d052f85d..94810d4a6332 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -1047,8 +1047,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) } /* Called for each device after PCI setup is done. */ -static void __init -pcibios_fixup_final(struct pci_dev *pdev) +static void pcibios_fixup_final(struct pci_dev *pdev) { set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index e92e40527d6d..9835312d5a91 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -19,7 +19,10 @@ #include <linux/kprobes.h> #include <linux/compat.h> #include <linux/uaccess.h> +#include <linux/regset.h> +#include <linux/elf.h> #include <asm/traps.h> +#include <arch/chip.h> void user_enable_single_step(struct task_struct *child) { @@ -45,6 +48,100 @@ void ptrace_disable(struct task_struct *child) clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); } +/* + * Get registers from task and ready the result for userspace. + * Note that we localize the API issues to getregs() and putregs() at + * some cost in performance, e.g. we need a full pt_regs copy for + * PEEKUSR, and two copies for POKEUSR. But in general we expect + * GETREGS/PUTREGS to be the API of choice anyway. + */ +static char *getregs(struct task_struct *child, struct pt_regs *uregs) +{ + *uregs = *task_pt_regs(child); + + /* Set up flags ABI bits. */ + uregs->flags = 0; +#ifdef CONFIG_COMPAT + if (task_thread_info(child)->status & TS_COMPAT) + uregs->flags |= PT_FLAGS_COMPAT; +#endif + + return (char *)uregs; +} + +/* Put registers back to task. */ +static void putregs(struct task_struct *child, struct pt_regs *uregs) +{ + struct pt_regs *regs = task_pt_regs(child); + + /* Don't allow overwriting the kernel-internal flags word. */ + uregs->flags = regs->flags; + + /* Only allow setting the ICS bit in the ex1 word. */ + uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1)); + + *regs = *uregs; +} + +enum tile_regset { + REGSET_GPR, +}; + +static int tile_gpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs regs; + + getregs(target, ®s); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ®s, 0, + sizeof(regs)); +} + +static int tile_gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct pt_regs regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, + sizeof(regs)); + if (ret) + return ret; + + putregs(target, ®s); + + return 0; +} + +static const struct user_regset tile_user_regset[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .get = tile_gpr_get, + .set = tile_gpr_set, + }, +}; + +static const struct user_regset_view tile_user_regset_view = { + .name = CHIP_ARCH_NAME, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = tile_user_regset, + .n = ARRAY_SIZE(tile_user_regset), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &tile_user_regset_view; +} + long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { @@ -53,14 +150,13 @@ long arch_ptrace(struct task_struct *child, long request, long ret = -EIO; char *childreg; struct pt_regs copyregs; - int ex1_offset; switch (request) { case PTRACE_PEEKUSR: /* Read register from pt_regs. */ if (addr >= PTREGS_SIZE) break; - childreg = (char *)task_pt_regs(child) + addr; + childreg = getregs(child, ©regs) + addr; #ifdef CONFIG_COMPAT if (is_compat_task()) { if (addr & (sizeof(compat_long_t)-1)) @@ -79,17 +175,7 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_POKEUSR: /* Write register in pt_regs. */ if (addr >= PTREGS_SIZE) break; - childreg = (char *)task_pt_regs(child) + addr; - - /* Guard against overwrites of the privilege level. */ - ex1_offset = PTREGS_OFFSET_EX1; -#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN) - if (is_compat_task()) /* point at low word */ - ex1_offset += sizeof(compat_long_t); -#endif - if (addr == ex1_offset) - data = PL_ICS_EX1(USER_PL, EX1_ICS(data)); - + childreg = getregs(child, ©regs) + addr; #ifdef CONFIG_COMPAT if (is_compat_task()) { if (addr & (sizeof(compat_long_t)-1)) @@ -102,24 +188,20 @@ long arch_ptrace(struct task_struct *child, long request, break; *(long *)childreg = data; } + putregs(child, ©regs); ret = 0; break; case PTRACE_GETREGS: /* Get all registers from the child. */ - if (copy_to_user(datap, task_pt_regs(child), - sizeof(struct pt_regs)) == 0) { - ret = 0; - } + ret = copy_regset_to_user(child, &tile_user_regset_view, + REGSET_GPR, 0, + sizeof(struct pt_regs), datap); break; case PTRACE_SETREGS: /* Set all registers in the child. */ - if (copy_from_user(©regs, datap, - sizeof(struct pt_regs)) == 0) { - copyregs.ex1 = - PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1)); - *task_pt_regs(child) = copyregs; - ret = 0; - } + ret = copy_regset_from_user(child, &tile_user_regset_view, + REGSET_GPR, 0, + sizeof(struct pt_regs), datap); break; case PTRACE_GETFPREGS: /* Get the child FPU state. */ @@ -128,12 +210,16 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_SETOPTIONS: /* Support TILE-specific ptrace options. */ - child->ptrace &= ~PT_TRACE_MASK_TILE; + BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK); tmp = data & PTRACE_O_MASK_TILE; data &= ~PTRACE_O_MASK_TILE; ret = ptrace_request(child, request, addr, data); - if (tmp & PTRACE_O_TRACEMIGRATE) - child->ptrace |= PT_TRACE_MIGRATE; + if (ret == 0) { + unsigned int flags = child->ptrace; + flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT); + flags |= (tmp << PT_OPT_FLAG_SHIFT); + child->ptrace = flags; + } break; default: diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 812e2d037972..650ccff8378c 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; - - if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; - } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - } - -full_search: - addr = ALIGN(start_addr, huge_page_size(h)); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = ALIGN(vma->vm_end, huge_page_size(h)); - } + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, @@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; - unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; - - /* don't allow allocations above current base */ - if (mm->free_area_cache > base) - mm->free_area_cache = base; - - if (len <= largest_hole) { - largest_hole = 0; - mm->free_area_cache = base; - } -try_again: - /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; - - /* either no address requested or can't fit in requested address hole */ - addr = (mm->free_area_cache - len) & huge_page_mask(h); - do { - /* - * Lookup failure means no vma is above this address, - * i.e. return with success: - */ - vma = find_vma_prev(mm, addr, &prev_vma); - if (!vma) { - return addr; - break; - } - - /* - * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: - */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { - /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - mm->free_area_cache = addr; - return addr; - } else { - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) { - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } - } + struct vm_unmapped_area_info info; + unsigned long addr; - /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); - /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); - - } while (len <= vma->vm_start); - -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; - addr = hugetlb_get_unmapped_area_bottomup(file, addr0, - len, pgoff, flags); - - /* - * Restore the topdown base: - */ - mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } |