summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2013-02-11 21:28:10 +0400
committerJames Hogan <james.hogan@imgtec.com>2013-03-03 00:11:13 +0400
commitf75c28d896f4dd0064e60bba1e82a4c98908d239 (patch)
tree18097d57e812bdcd86634d86b6089fc07605946a
parentc838e72a35e49ea51c39c2c634ece028fa49c565 (diff)
downloadlinux-f75c28d896f4dd0064e60bba1e82a4c98908d239.tar.xz
metag: hugetlb: convert to vm_unmapped_area()
Convert hugetlb_get_unmapped_area_new_pmd() to use vm_unmapped_area() rather than searching the virtual address space itself. This fixes the following errors in linux-next due to the specified members being removed after other architectures have already been converted: arch/metag/mm/hugetlbpage.c: In function 'hugetlb_get_unmapped_area_new_pmd': arch/metag/mm/hugetlbpage.c:199: error: 'struct mm_struct' has no member named 'cached_hole_size' arch/metag/mm/hugetlbpage.c:200: error: 'struct mm_struct' has no member named 'free_area_cache' arch/metag/mm/hugetlbpage.c:215: error: 'struct mm_struct' has no member named 'cached_hole_size' Signed-off-by: James Hogan <james.hogan@imgtec.com> Acked-by: Michel Lespinasse <walken@google.com>
-rw-r--r--arch/metag/mm/hugetlbpage.c52
1 files changed, 10 insertions, 42 deletions
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index 24ceed4f4eed..3c52fa6d0f8e 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -192,43 +192,15 @@ new_search:
static unsigned long
hugetlb_get_unmapped_area_new_pmd(unsigned long len)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr, addr;
-
- if (ALIGN_HUGEPT(len) > mm->cached_hole_size)
- start_addr = mm->free_area_cache;
- else
- start_addr = TASK_UNMAPPED_BASE;
-
-new_search:
- addr = ALIGN_HUGEPT(start_addr);
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- if (TASK_SIZE - len < addr) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto new_search;
- }
- return 0;
- }
- /* skip ahead if we've aligned right over some vmas */
- if (vma && vma->vm_end <= addr)
- continue;
- if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) {
-#if HPAGE_SHIFT < HUGEPT_SHIFT
- if (len & HUGEPT_MASK)
- mm->context.part_huge = addr + len;
-#endif
- return addr;
- }
- addr = ALIGN_HUGEPT(vma->vm_end);
- }
+ struct vm_unmapped_area_info info;
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & HUGEPT_MASK;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
unsigned long
@@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Find an unmapped naturally aligned set of 4MB blocks that we can use
* for huge pages.
*/
- addr = hugetlb_get_unmapped_area_new_pmd(len);
- if (likely(addr))
- return addr;
-
- return -EINVAL;
+ return hugetlb_get_unmapped_area_new_pmd(len);
}
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/